From dc50eab76b709d68175a358d6e23a5a3890764d3 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:39:57 +0200 Subject: Merging upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/net/Kconfig | 9 + drivers/net/Makefile | 2 +- drivers/net/Space.c | 6 - drivers/net/amt.c | 1 + drivers/net/appletalk/Kconfig | 102 - drivers/net/appletalk/Makefile | 7 - drivers/net/appletalk/ipddp.c | 345 -- drivers/net/appletalk/ipddp.h | 28 - drivers/net/bareudp.c | 45 +- drivers/net/bonding/bond_alb.c | 3 +- drivers/net/bonding/bond_main.c | 5 +- drivers/net/bonding/bond_netlink.c | 2 +- drivers/net/can/Kconfig | 1 + drivers/net/can/at91_can.c | 984 ++--- drivers/net/can/dev/dev.c | 41 +- drivers/net/can/dev/netlink.c | 2 +- drivers/net/can/dev/rx-offload.c | 2 +- drivers/net/can/sja1000/peak_pci.c | 2 +- drivers/net/can/sja1000/sja1000.c | 2 +- drivers/net/dsa/b53/b53_common.c | 4 +- drivers/net/dsa/b53/b53_mdio.c | 2 +- drivers/net/dsa/b53/b53_mmap.c | 6 +- drivers/net/dsa/b53/b53_srab.c | 8 +- drivers/net/dsa/bcm_sf2.c | 49 +- drivers/net/dsa/bcm_sf2.h | 2 +- drivers/net/dsa/bcm_sf2_cfp.c | 4 +- drivers/net/dsa/dsa_loop.c | 9 + drivers/net/dsa/hirschmann/hellcreek.c | 8 +- drivers/net/dsa/lan9303-core.c | 4 +- drivers/net/dsa/lantiq_gswip.c | 45 +- drivers/net/dsa/microchip/Makefile | 2 +- drivers/net/dsa/microchip/ksz8795.c | 86 +- drivers/net/dsa/microchip/ksz8795_reg.h | 21 - drivers/net/dsa/microchip/ksz9477.c | 274 ++ drivers/net/dsa/microchip/ksz9477.h | 43 + drivers/net/dsa/microchip/ksz9477_acl.c | 1436 +++++++ drivers/net/dsa/microchip/ksz9477_i2c.c | 5 +- drivers/net/dsa/microchip/ksz9477_reg.h | 20 - drivers/net/dsa/microchip/ksz9477_tc_flower.c | 281 ++ drivers/net/dsa/microchip/ksz_common.c | 645 ++- drivers/net/dsa/microchip/ksz_common.h | 42 + drivers/net/dsa/microchip/ksz_ptp.c | 2 +- drivers/net/dsa/microchip/ksz_spi.c | 5 +- drivers/net/dsa/mt7530-mmio.c | 7 +- drivers/net/dsa/mt7530.c | 35 +- drivers/net/dsa/mv88e6xxx/chip.c | 6 +- drivers/net/dsa/mv88e6xxx/chip.h | 4 +- drivers/net/dsa/mv88e6xxx/pcs-639x.c | 2 +- drivers/net/dsa/mv88e6xxx/ptp.c | 4 + drivers/net/dsa/mv88e6xxx/serdes.c | 10 +- drivers/net/dsa/mv88e6xxx/serdes.h | 8 +- drivers/net/dsa/ocelot/felix.c | 68 +- drivers/net/dsa/ocelot/felix.h | 6 +- drivers/net/dsa/ocelot/ocelot_ext.c | 8 +- drivers/net/dsa/ocelot/seville_vsc9953.c | 8 +- drivers/net/dsa/qca/qca8k-8xxx.c | 74 +- drivers/net/dsa/qca/qca8k-common.c | 7 +- drivers/net/dsa/qca/qca8k-leds.c | 6 +- drivers/net/dsa/qca/qca8k.h | 2 +- drivers/net/dsa/realtek/realtek-smi.c | 36 +- drivers/net/dsa/realtek/realtek.h | 2 +- drivers/net/dsa/realtek/rtl8365mb.c | 5 +- drivers/net/dsa/realtek/rtl8366-core.c | 8 +- drivers/net/dsa/realtek/rtl8366rb.c | 44 +- drivers/net/dsa/rzn1_a5psw.c | 8 +- drivers/net/dsa/sja1105/sja1105_clocking.c | 21 +- drivers/net/dsa/sja1105/sja1105_main.c | 4 +- drivers/net/dsa/vitesse-vsc73xx-core.c | 49 +- drivers/net/dsa/vitesse-vsc73xx-platform.c | 8 +- drivers/net/dsa/xrs700x/xrs700x.c | 30 +- drivers/net/dummy.c | 1 + drivers/net/eql.c | 1 + drivers/net/ethernet/8390/ax88796.c | 6 +- drivers/net/ethernet/8390/mcf8390.c | 5 +- drivers/net/ethernet/8390/ne.c | 5 +- drivers/net/ethernet/actions/owl-emac.c | 6 +- drivers/net/ethernet/aeroflex/greth.c | 6 +- drivers/net/ethernet/allwinner/sun4i-emac.c | 5 +- drivers/net/ethernet/altera/altera_tse.h | 2 +- drivers/net/ethernet/altera/altera_tse_main.c | 19 +- drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +- drivers/net/ethernet/amd/au1000_eth.c | 6 +- drivers/net/ethernet/amd/pds_core/adminq.c | 64 +- drivers/net/ethernet/amd/pds_core/core.c | 94 +- drivers/net/ethernet/amd/pds_core/core.h | 9 +- drivers/net/ethernet/amd/pds_core/debugfs.c | 4 + drivers/net/ethernet/amd/pds_core/dev.c | 27 +- drivers/net/ethernet/amd/pds_core/devlink.c | 34 +- drivers/net/ethernet/amd/pds_core/fw.c | 3 + drivers/net/ethernet/amd/pds_core/main.c | 74 +- drivers/net/ethernet/amd/sunlance.c | 6 +- drivers/net/ethernet/amd/xgbe/xgbe-platform.c | 48 +- drivers/net/ethernet/apm/xgene-v2/main.c | 6 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 21 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 3 +- drivers/net/ethernet/apple/macmace.c | 6 +- drivers/net/ethernet/aquantia/atlantic/aq_ptp.c | 32 +- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 74 +- drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 23 +- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 23 +- drivers/net/ethernet/arc/emac_arc.c | 6 +- drivers/net/ethernet/arc/emac_rockchip.c | 5 +- drivers/net/ethernet/asix/ax88796c_ioctl.c | 2 +- drivers/net/ethernet/atheros/ag71xx.c | 8 +- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 13 +- drivers/net/ethernet/atheros/atlx/atl1.c | 4 +- drivers/net/ethernet/atheros/atlx/atl2.c | 2 +- drivers/net/ethernet/broadcom/asp2/bcmasp.c | 14 +- drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c | 3 + drivers/net/ethernet/broadcom/bcm4908_enet.c | 6 +- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 14 +- drivers/net/ethernet/broadcom/bcmsysport.c | 8 +- drivers/net/ethernet/broadcom/bgmac-platform.c | 6 +- drivers/net/ethernet/broadcom/bnxt/Makefile | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 275 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 13 + drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 95 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 678 +++- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 545 ++- drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c | 241 ++ drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h | 30 + drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c | 2 + drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h | 14 + drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 26 +- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 26 +- drivers/net/ethernet/broadcom/sb1250-mac.c | 6 +- drivers/net/ethernet/broadcom/tg3.c | 136 +- drivers/net/ethernet/broadcom/tg3.h | 3 + drivers/net/ethernet/brocade/bna/bfa_ioc.c | 2 +- drivers/net/ethernet/brocade/bna/bnad.h | 1 - drivers/net/ethernet/cadence/macb_main.c | 6 +- drivers/net/ethernet/calxeda/xgmac.c | 6 +- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 18 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | 3 +- .../net/ethernet/cavium/liquidio/octeon_device.c | 11 +- drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 5 +- drivers/net/ethernet/chelsio/cxgb3/l2t.h | 2 +- drivers/net/ethernet/chelsio/cxgb3/sge.c | 15 +- drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h | 2 +- .../ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h | 2 +- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/sched.h | 2 +- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/smt.h | 2 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- .../chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c | 2 - .../chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h | 1 - .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 43 +- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.h | 36 +- .../ethernet/chelsio/inline_crypto/chtls/chtls.h | 1 - drivers/net/ethernet/cirrus/cs89x0.c | 5 +- drivers/net/ethernet/cirrus/ep93xx_eth.c | 8 +- drivers/net/ethernet/cirrus/mac89x0.c | 5 +- drivers/net/ethernet/cortina/gemini.c | 12 +- drivers/net/ethernet/davicom/dm9000.c | 6 +- drivers/net/ethernet/dec/tulip/tulip.h | 2 +- drivers/net/ethernet/dnet.c | 6 +- drivers/net/ethernet/engleder/tsnep_hw.h | 2 + drivers/net/ethernet/engleder/tsnep_main.c | 125 +- drivers/net/ethernet/ethoc.c | 6 +- drivers/net/ethernet/ezchip/nps_enet.c | 2 +- drivers/net/ethernet/faraday/ftgmac100.c | 5 +- drivers/net/ethernet/faraday/ftmac100.c | 5 +- drivers/net/ethernet/freescale/enetc/enetc.c | 2 +- drivers/net/ethernet/freescale/enetc/enetc.h | 2 +- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 2 +- drivers/net/ethernet/freescale/fec_main.c | 169 +- drivers/net/ethernet/freescale/fman/fman_memac.c | 11 +- .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 18 +- drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 10 +- drivers/net/ethernet/freescale/fsl_pq_mdio.c | 12 +- drivers/net/ethernet/google/gve/gve_main.c | 2 +- drivers/net/ethernet/google/gve/gve_rx.c | 8 +- drivers/net/ethernet/hisilicon/hip04_eth.c | 6 +- drivers/net/ethernet/hisilicon/hisi_femac.c | 6 +- drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 17 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 6 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | 2 +- drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | 2 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 5 +- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 5 + .../hisilicon/hns3/hns3_common/hclge_comm_cmd.c | 1 + .../hisilicon/hns3/hns3_common/hclge_comm_cmd.h | 2 + drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 3 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 116 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h | 2 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 161 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 18 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 2 +- drivers/net/ethernet/hisilicon/hns_mdio.c | 5 +- drivers/net/ethernet/huawei/hinic/hinic_devlink.c | 217 +- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 8 +- drivers/net/ethernet/i825xx/sni_82596.c | 5 +- drivers/net/ethernet/ibm/ehea/ehea_main.c | 10 +- drivers/net/ethernet/ibm/emac/core.c | 6 +- drivers/net/ethernet/ibm/emac/mal.c | 8 +- drivers/net/ethernet/ibm/emac/rgmii.c | 6 +- drivers/net/ethernet/ibm/emac/tah.c | 6 +- drivers/net/ethernet/ibm/emac/zmii.c | 6 +- drivers/net/ethernet/ibm/ibmveth.c | 2 +- drivers/net/ethernet/ibm/ibmvnic.c | 5 +- drivers/net/ethernet/intel/Kconfig | 14 + drivers/net/ethernet/intel/Makefile | 1 + drivers/net/ethernet/intel/e100.c | 2 +- drivers/net/ethernet/intel/e1000/e1000_hw.c | 1 + drivers/net/ethernet/intel/e1000/e1000_main.c | 2 +- drivers/net/ethernet/intel/e1000e/e1000.h | 20 + drivers/net/ethernet/intel/e1000e/ptp.c | 22 +- drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 8 +- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 1 + drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 1 + drivers/net/ethernet/intel/i40e/Makefile | 3 +- drivers/net/ethernet/intel/i40e/i40e.h | 212 +- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 8 +- drivers/net/ethernet/intel/i40e/i40e_adminq.h | 3 +- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2 + drivers/net/ethernet/intel/i40e/i40e_alloc.h | 24 +- drivers/net/ethernet/intel/i40e/i40e_client.c | 1 - drivers/net/ethernet/intel/i40e/i40e_common.c | 70 +- drivers/net/ethernet/intel/i40e/i40e_dcb.c | 4 +- drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_ddp.c | 31 +- drivers/net/ethernet/intel/i40e/i40e_debug.h | 47 + drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 3 +- drivers/net/ethernet/intel/i40e/i40e_devlink.c | 235 ++ drivers/net/ethernet/intel/i40e/i40e_devlink.h | 18 + drivers/net/ethernet/intel/i40e/i40e_diag.h | 5 +- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 16 +- drivers/net/ethernet/intel/i40e/i40e_hmc.c | 16 +- drivers/net/ethernet/intel/i40e/i40e_hmc.h | 4 + drivers/net/ethernet/intel/i40e/i40e_io.h | 16 + drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c | 9 +- drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h | 2 + drivers/net/ethernet/intel/i40e/i40e_main.c | 119 +- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 3 + drivers/net/ethernet/intel/i40e/i40e_osdep.h | 59 - drivers/net/ethernet/intel/i40e/i40e_prototype.h | 12 +- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 3 +- drivers/net/ethernet/intel/i40e/i40e_register.h | 5 + drivers/net/ethernet/intel/i40e/i40e_txrx.c | 9 +- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 + drivers/net/ethernet/intel/i40e/i40e_txrx_common.h | 2 + drivers/net/ethernet/intel/i40e/i40e_type.h | 62 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 74 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 5 +- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 4 - drivers/net/ethernet/intel/i40e/i40e_xsk.h | 4 + drivers/net/ethernet/intel/iavf/Makefile | 2 +- drivers/net/ethernet/intel/iavf/iavf.h | 30 +- drivers/net/ethernet/intel/iavf/iavf_client.c | 578 --- drivers/net/ethernet/intel/iavf/iavf_client.h | 169 - drivers/net/ethernet/intel/iavf/iavf_common.c | 35 +- drivers/net/ethernet/intel/iavf/iavf_ethtool.c | 13 +- drivers/net/ethernet/intel/iavf/iavf_fdir.c | 1 + drivers/net/ethernet/intel/iavf/iavf_main.c | 201 +- drivers/net/ethernet/intel/iavf/iavf_prototype.h | 2 - drivers/net/ethernet/intel/iavf/iavf_txrx.c | 47 +- drivers/net/ethernet/intel/iavf/iavf_type.h | 12 - drivers/net/ethernet/intel/iavf/iavf_virtchnl.c | 40 +- drivers/net/ethernet/intel/ice/Makefile | 2 +- drivers/net/ethernet/intel/ice/ice.h | 23 +- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 325 +- drivers/net/ethernet/intel/ice/ice_common.c | 754 +++- drivers/net/ethernet/intel/ice/ice_common.h | 51 +- drivers/net/ethernet/intel/ice/ice_ddp.c | 538 ++- drivers/net/ethernet/intel/ice/ice_ddp.h | 27 +- drivers/net/ethernet/intel/ice/ice_devids.h | 10 +- drivers/net/ethernet/intel/ice/ice_dpll.c | 2117 ++++++++++ drivers/net/ethernet/intel/ice/ice_dpll.h | 113 + drivers/net/ethernet/intel/ice/ice_eswitch_br.c | 6 +- drivers/net/ethernet/intel/ice/ice_ethtool.c | 228 +- drivers/net/ethernet/intel/ice/ice_ethtool.h | 8 + drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c | 24 +- drivers/net/ethernet/intel/ice/ice_flow.c | 5 +- drivers/net/ethernet/intel/ice/ice_flow.h | 3 - drivers/net/ethernet/intel/ice/ice_gnss.c | 3 + drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 53 +- drivers/net/ethernet/intel/ice/ice_lag.c | 135 +- drivers/net/ethernet/intel/ice/ice_lag.h | 2 + drivers/net/ethernet/intel/ice/ice_lib.c | 50 +- drivers/net/ethernet/intel/ice/ice_main.c | 94 +- drivers/net/ethernet/intel/ice/ice_ptp.c | 713 +++- drivers/net/ethernet/intel/ice/ice_ptp.h | 46 +- drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 812 +++- drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 97 +- drivers/net/ethernet/intel/ice/ice_sched.c | 56 +- drivers/net/ethernet/intel/ice/ice_sched.h | 6 +- drivers/net/ethernet/intel/ice/ice_sriov.c | 314 +- drivers/net/ethernet/intel/ice/ice_sriov.h | 17 +- drivers/net/ethernet/intel/ice/ice_switch.c | 63 +- drivers/net/ethernet/intel/ice/ice_txrx.c | 3 - drivers/net/ethernet/intel/ice/ice_txrx.h | 1 - drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 2 +- drivers/net/ethernet/intel/ice/ice_type.h | 29 +- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 2 + drivers/net/ethernet/intel/ice/ice_vf_lib.h | 9 +- drivers/net/ethernet/intel/ice/ice_virtchnl.c | 88 +- drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 29 +- drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c | 16 +- drivers/net/ethernet/intel/ice/ice_xsk.c | 22 +- drivers/net/ethernet/intel/idpf/Makefile | 18 + drivers/net/ethernet/intel/idpf/idpf.h | 968 +++++ drivers/net/ethernet/intel/idpf/idpf_controlq.c | 621 +++ drivers/net/ethernet/intel/idpf/idpf_controlq.h | 130 + .../net/ethernet/intel/idpf/idpf_controlq_api.h | 169 + .../net/ethernet/intel/idpf/idpf_controlq_setup.c | 171 + drivers/net/ethernet/intel/idpf/idpf_dev.c | 165 + drivers/net/ethernet/intel/idpf/idpf_devids.h | 10 + drivers/net/ethernet/intel/idpf/idpf_ethtool.c | 1369 +++++++ drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h | 124 + drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h | 293 ++ drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h | 128 + drivers/net/ethernet/intel/idpf/idpf_lib.c | 2381 +++++++++++ drivers/net/ethernet/intel/idpf/idpf_main.c | 279 ++ drivers/net/ethernet/intel/idpf/idpf_mem.h | 20 + .../net/ethernet/intel/idpf/idpf_singleq_txrx.c | 1182 ++++++ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 4294 ++++++++++++++++++++ drivers/net/ethernet/intel/idpf/idpf_txrx.h | 1023 +++++ drivers/net/ethernet/intel/idpf/idpf_vf_dev.c | 163 + drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 3798 +++++++++++++++++ drivers/net/ethernet/intel/idpf/virtchnl2.h | 1273 ++++++ .../net/ethernet/intel/idpf/virtchnl2_lan_desc.h | 451 ++ drivers/net/ethernet/intel/igb/e1000_i210.c | 4 +- drivers/net/ethernet/intel/igb/e1000_nvm.c | 4 +- drivers/net/ethernet/intel/igb/e1000_phy.c | 4 +- drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 +- drivers/net/ethernet/intel/igb/igb_main.c | 55 +- drivers/net/ethernet/intel/igbvf/netdev.c | 30 +- drivers/net/ethernet/intel/igc/igc_ethtool.c | 5 +- drivers/net/ethernet/intel/igc/igc_i225.c | 1 + drivers/net/ethernet/intel/igc/igc_main.c | 2 +- drivers/net/ethernet/intel/igc/igc_phy.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 36 +- drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 61 +- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 145 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 6 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 44 +- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 34 +- drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 105 +- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 28 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 43 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 44 +- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 149 +- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 2 +- drivers/net/ethernet/korina.c | 6 +- drivers/net/ethernet/lantiq_etop.c | 6 +- drivers/net/ethernet/lantiq_xrx200.c | 6 +- drivers/net/ethernet/litex/litex_liteeth.c | 6 +- drivers/net/ethernet/marvell/mv643xx_eth.c | 11 +- drivers/net/ethernet/marvell/mvmdio.c | 59 +- drivers/net/ethernet/marvell/mvneta.c | 8 +- drivers/net/ethernet/marvell/mvneta_bm.c | 6 +- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 12 +- .../net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c | 168 +- .../net/ethernet/marvell/octeon_ep/octep_config.h | 22 +- .../ethernet/marvell/octeon_ep/octep_ctrl_net.c | 24 +- .../ethernet/marvell/octeon_ep/octep_ctrl_net.h | 18 + .../net/ethernet/marvell/octeon_ep/octep_main.c | 229 +- .../net/ethernet/marvell/octeon_ep/octep_main.h | 13 +- .../marvell/octeon_ep/octep_regs_cn9k_pf.h | 4 + drivers/net/ethernet/marvell/octeon_ep/octep_rx.h | 3 + drivers/net/ethernet/marvell/octeon_ep/octep_tx.h | 4 + drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 8 +- drivers/net/ethernet/marvell/octeontx2/af/mbox.h | 11 +- drivers/net/ethernet/marvell/octeontx2/af/npc.h | 8 + drivers/net/ethernet/marvell/octeontx2/af/ptp.c | 86 +- .../net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 7 +- .../ethernet/marvell/octeontx2/af/rvu_debugfs.c | 53 + .../ethernet/marvell/octeontx2/af/rvu_devlink.c | 464 +-- .../net/ethernet/marvell/octeontx2/af/rvu_npc.c | 17 +- .../net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 62 + .../ethernet/marvell/octeontx2/nic/otx2_common.c | 16 +- .../ethernet/marvell/octeontx2/nic/otx2_ethtool.c | 1 - .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 3 +- .../net/ethernet/marvell/octeontx2/nic/otx2_ptp.c | 31 +- .../net/ethernet/marvell/octeontx2/nic/otx2_tc.c | 58 + .../net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 7 +- drivers/net/ethernet/marvell/pxa168_eth.c | 5 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 +- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- drivers/net/ethernet/mediatek/mtk_ppe.c | 4 +- drivers/net/ethernet/mediatek/mtk_ppe.h | 19 +- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 12 +- drivers/net/ethernet/mediatek/mtk_wed.c | 1432 +++++-- drivers/net/ethernet/mediatek/mtk_wed.h | 57 + drivers/net/ethernet/mediatek/mtk_wed_debugfs.c | 400 +- drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 150 +- drivers/net/ethernet/mediatek/mtk_wed_regs.h | 365 +- drivers/net/ethernet/mediatek/mtk_wed_wo.h | 3 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 +- drivers/net/ethernet/mellanox/mlx4/fw.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 8 + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 3 + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 70 + drivers/net/ethernet/mellanox/mlx5/core/dev.c | 122 +- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 11 - .../ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 49 +- .../mellanox/mlx5/core/diag/reporter_vnic.c | 118 +- .../mellanox/mlx5/core/diag/reporter_vnic.h | 6 +- drivers/net/ethernet/mellanox/mlx5/core/dpll.c | 432 ++ drivers/net/ethernet/mellanox/mlx5/core/en.h | 13 +- .../net/ethernet/mellanox/mlx5/core/en/devlink.c | 8 + drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 1 - .../net/ethernet/mellanox/mlx5/core/en/health.c | 187 +- .../net/ethernet/mellanox/mlx5/core/en/health.h | 14 +- .../ethernet/mellanox/mlx5/core/en/reporter_rx.c | 422 +- .../ethernet/mellanox/mlx5/core/en/reporter_tx.c | 346 +- drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c | 32 +- drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h | 9 +- drivers/net/ethernet/mellanox/mlx5/core/en/rss.c | 144 +- drivers/net/ethernet/mellanox/mlx5/core/en/rss.h | 20 +- .../net/ethernet/mellanox/mlx5/core/en/rx_res.c | 105 +- .../net/ethernet/mellanox/mlx5/core/en/rx_res.h | 12 +- .../net/ethernet/mellanox/mlx5/core/en/tc_tun.c | 20 +- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 2 +- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 26 +- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 47 +- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 393 +- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 3 +- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 1 - .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 8 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 97 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 32 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 16 +- .../net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c | 8 +- drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c | 96 +- drivers/net/ethernet/mellanox/mlx5/core/events.c | 5 - drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 10 +- drivers/net/ethernet/mellanox/mlx5/core/health.c | 129 +- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 24 +- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 47 +- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h | 1 + .../net/ethernet/mellanox/mlx5/core/lag/mpesw.c | 27 +- .../net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 10 +- .../net/ethernet/mellanox/mlx5/core/lib/devcom.c | 25 + .../net/ethernet/mellanox/mlx5/core/lib/devcom.h | 6 + drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h | 1 - .../mellanox/mlx5/core/lib/ipsec_fs_roce.c | 542 ++- .../mellanox/mlx5/core/lib/ipsec_fs_roce.h | 14 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 27 +- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 36 +- drivers/net/ethernet/mellanox/mlx5/core/port.c | 3 +- .../net/ethernet/mellanox/mlx5/core/sf/dev/dev.c | 101 +- .../net/ethernet/mellanox/mlx5/core/sf/dev/dev.h | 6 + .../ethernet/mellanox/mlx5/core/sf/dev/driver.c | 26 + .../net/ethernet/mellanox/mlx5/core/sf/devlink.c | 242 +- .../ethernet/mellanox/mlx5/core/sf/vhca_event.c | 69 +- .../ethernet/mellanox/mlx5/core/sf/vhca_event.h | 3 + .../mellanox/mlx5/core/steering/dr_action.c | 36 +- .../mellanox/mlx5/core/steering/dr_types.h | 5 +- .../ethernet/mellanox/mlx5/core/steering/fs_dr.c | 9 +- .../ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c | 6 +- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 43 +- drivers/net/ethernet/mellanox/mlxsw/core.c | 178 +- drivers/net/ethernet/mellanox/mlxsw/core.h | 6 +- .../ethernet/mellanox/mlxsw/core_acl_flex_keys.c | 70 +- .../ethernet/mellanox/mlxsw/core_acl_flex_keys.h | 15 +- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 4 +- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 2 +- .../ethernet/mellanox/mlxsw/core_linecard_dev.c | 9 +- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 3 +- drivers/net/ethernet/mellanox/mlxsw/i2c.c | 4 +- drivers/net/ethernet/mellanox/mlxsw/pci.c | 37 +- drivers/net/ethernet/mellanox/mlxsw/reg.h | 20 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 95 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 3 +- .../ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c | 20 +- .../mellanox/mlxsw/spectrum_acl_flex_keys.c | 30 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | 69 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c | 20 +- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 2 +- drivers/net/ethernet/micrel/ks8842.c | 5 +- drivers/net/ethernet/micrel/ks8851_par.c | 6 +- drivers/net/ethernet/microchip/lan743x_ethtool.c | 3 +- drivers/net/ethernet/microchip/lan743x_main.c | 51 +- drivers/net/ethernet/microchip/lan743x_main.h | 8 + drivers/net/ethernet/microchip/lan743x_ptp.c | 9 + .../net/ethernet/microchip/lan966x/lan966x_lag.c | 9 +- .../net/ethernet/microchip/lan966x/lan966x_main.c | 7 +- .../net/ethernet/microchip/lan966x/lan966x_port.c | 5 +- .../net/ethernet/microchip/sparx5/sparx5_ethtool.c | 3 +- .../net/ethernet/microchip/sparx5/sparx5_main.c | 7 +- .../net/ethernet/microchip/sparx5/sparx5_main.h | 1 + .../net/ethernet/microchip/sparx5/sparx5_packet.c | 2 + .../net/ethernet/microchip/vcap/vcap_api_debugfs.c | 2 +- drivers/net/ethernet/microsoft/mana/mana_en.c | 5 +- drivers/net/ethernet/moxa/moxart_ether.c | 6 +- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 6 +- drivers/net/ethernet/natsemi/jazzsonic.c | 6 +- drivers/net/ethernet/natsemi/macsonic.c | 6 +- drivers/net/ethernet/natsemi/xtsonic.c | 6 +- drivers/net/ethernet/netronome/nfp/crypto/ipsec.c | 45 +- .../net/ethernet/netronome/nfp/flower/conntrack.c | 46 +- .../ethernet/netronome/nfp/flower/tunnel_conf.c | 2 +- drivers/net/ethernet/netronome/nfp/nfd3/dp.c | 2 +- drivers/net/ethernet/netronome/nfp/nfd3/xsk.c | 2 +- drivers/net/ethernet/netronome/nfp/nfdk/dp.c | 2 +- .../net/ethernet/netronome/nfp/nfp_net_common.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_net_repr.h | 2 +- .../ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 6 +- .../net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h | 2 +- .../ethernet/netronome/nfp/nfpcore/nfp_resource.c | 2 +- drivers/net/ethernet/ni/nixge.c | 11 +- drivers/net/ethernet/nxp/lpc_eth.c | 6 +- .../net/ethernet/pensando/ionic/ionic_bus_pci.c | 4 + drivers/net/ethernet/pensando/ionic/ionic_dev.c | 1 + drivers/net/ethernet/pensando/ionic/ionic_dev.h | 4 +- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 15 + drivers/net/ethernet/pensando/ionic/ionic_main.c | 26 +- drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 77 +- .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 - drivers/net/ethernet/qlogic/qed/qed_debug.c | 7 +- drivers/net/ethernet/qlogic/qed/qed_devlink.c | 6 +- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 46 +- drivers/net/ethernet/qualcomm/emac/emac.c | 6 +- drivers/net/ethernet/renesas/Kconfig | 9 +- drivers/net/ethernet/renesas/Makefile | 4 +- drivers/net/ethernet/renesas/ravb_main.c | 6 +- drivers/net/ethernet/renesas/rswitch.c | 55 +- drivers/net/ethernet/renesas/rswitch.h | 2 +- drivers/net/ethernet/renesas/sh_eth.c | 6 +- .../net/ethernet/samsung/sxgbe/sxgbe_platform.c | 6 +- drivers/net/ethernet/seeq/sgiseeq.c | 6 +- drivers/net/ethernet/sfc/efx_channels.c | 2 +- drivers/net/ethernet/sfc/mae.c | 62 +- drivers/net/ethernet/sfc/mcdi.c | 3 +- drivers/net/ethernet/sfc/ptp.c | 27 +- drivers/net/ethernet/sfc/siena/efx_channels.c | 2 +- drivers/net/ethernet/sfc/tc.c | 337 +- drivers/net/ethernet/sfc/tc.h | 8 + drivers/net/ethernet/sfc/tc_conntrack.c | 91 +- drivers/net/ethernet/sgi/ioc3-eth.c | 6 +- drivers/net/ethernet/sgi/meth.c | 6 +- drivers/net/ethernet/smsc/smc91x.c | 6 +- drivers/net/ethernet/smsc/smc91x.h | 19 - drivers/net/ethernet/smsc/smsc911x.c | 6 +- drivers/net/ethernet/socionext/netsec.c | 8 +- drivers/net/ethernet/socionext/sni_ave.c | 6 +- drivers/net/ethernet/stmicro/stmmac/Kconfig | 11 + drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + drivers/net/ethernet/stmicro/stmmac/common.h | 59 +- .../net/ethernet/stmicro/stmmac/dwmac-anarion.c | 10 +- .../ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c | 15 +- .../net/ethernet/stmicro/stmmac/dwmac-generic.c | 15 +- drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c | 13 +- .../net/ethernet/stmicro/stmmac/dwmac-ingenic.c | 33 +- .../net/ethernet/stmicro/stmmac/dwmac-intel-plat.c | 34 +- drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c | 1 - .../net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | 27 +- .../net/ethernet/stmicro/stmmac/dwmac-loongson1.c | 209 + .../net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c | 19 +- .../net/ethernet/stmicro/stmmac/dwmac-mediatek.c | 6 +- drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c | 25 +- .../net/ethernet/stmicro/stmmac/dwmac-meson8b.c | 53 +- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 14 +- .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 16 +- .../net/ethernet/stmicro/stmmac/dwmac-starfive.c | 10 +- drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | 14 +- drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c | 130 +- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 21 +- drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 23 +- drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c | 10 +- .../net/ethernet/stmicro/stmmac/dwmac-visconti.c | 18 +- drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c | 15 +- drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 15 +- drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h | 3 + .../net/ethernet/stmicro/stmmac/dwxgmac2_core.c | 58 +- drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c | 15 +- .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 131 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 196 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 70 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.h | 5 - drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 34 +- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | 2 +- .../net/ethernet/stmicro/stmmac/stmmac_selftests.c | 2 +- drivers/net/ethernet/sun/niu.c | 5 +- drivers/net/ethernet/sun/sunbmac.c | 6 +- drivers/net/ethernet/sun/sunqe.c | 6 +- drivers/net/ethernet/sunplus/spl2sw_driver.c | 6 +- drivers/net/ethernet/ti/Kconfig | 9 +- drivers/net/ethernet/ti/Makefile | 1 - drivers/net/ethernet/ti/am65-cpsw-nuss.c | 4 +- drivers/net/ethernet/ti/cpmac.c | 1251 ------ drivers/net/ethernet/ti/cpsw.c | 2 + drivers/net/ethernet/ti/cpsw_new.c | 3 + drivers/net/ethernet/ti/cpsw_priv.c | 2 +- drivers/net/ethernet/ti/davinci_emac.c | 40 +- drivers/net/ethernet/ti/davinci_mdio.c | 6 +- drivers/net/ethernet/ti/icssg/icssg_config.c | 14 + drivers/net/ethernet/ti/icssg/icssg_prueth.c | 49 +- drivers/net/ethernet/ti/icssg/icssg_prueth.h | 2 + drivers/net/ethernet/ti/netcp_core.c | 5 +- drivers/net/ethernet/ti/netcp_ethss.c | 4 +- drivers/net/ethernet/toshiba/tc35815.c | 10 +- drivers/net/ethernet/tundra/tsi108_eth.c | 6 +- drivers/net/ethernet/via/via-rhine.c | 6 +- drivers/net/ethernet/via/via-velocity.c | 6 +- drivers/net/ethernet/wangxun/libwx/wx_ethtool.c | 169 + drivers/net/ethernet/wangxun/libwx/wx_ethtool.h | 8 + drivers/net/ethernet/wangxun/libwx/wx_hw.c | 191 + drivers/net/ethernet/wangxun/libwx/wx_hw.h | 9 + drivers/net/ethernet/wangxun/libwx/wx_lib.c | 20 +- drivers/net/ethernet/wangxun/libwx/wx_type.h | 82 + drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 5 + drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c | 2 + drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 7 +- drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 119 +- drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 3 - drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 5 + drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 110 +- drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h | 1 - drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 10 +- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 56 +- drivers/net/ethernet/wangxun/txgbe/txgbe_type.h | 6 - drivers/net/ethernet/wiznet/w5100-spi.c | 12 +- drivers/net/ethernet/wiznet/w5100.c | 10 +- drivers/net/ethernet/wiznet/w5300.c | 5 +- drivers/net/ethernet/xilinx/ll_temac_main.c | 5 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 6 +- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 8 +- drivers/net/ethernet/xscale/ixp4xx_eth.c | 77 +- drivers/net/fjes/fjes_main.c | 2 +- drivers/net/geneve.c | 205 +- drivers/net/gtp.c | 10 +- drivers/net/hamradio/Kconfig | 15 +- drivers/net/hamradio/baycom_epp.c | 4 +- drivers/net/hyperv/netvsc.c | 23 +- drivers/net/hyperv/netvsc_drv.c | 82 +- drivers/net/ifb.c | 1 + drivers/net/ipa/ipa_interrupt.c | 2 +- drivers/net/ipa/ipa_power.c | 2 +- drivers/net/macvtap.c | 1 + drivers/net/mctp/Kconfig | 9 + drivers/net/mctp/Makefile | 1 + drivers/net/mctp/mctp-i3c.c | 755 ++++ drivers/net/mdio/acpi_mdio.c | 1 + drivers/net/mdio/fwnode_mdio.c | 1 + drivers/net/mdio/mdio-aspeed.c | 7 +- drivers/net/mdio/mdio-bcm-iproc.c | 6 +- drivers/net/mdio/mdio-bcm-unimac.c | 6 +- drivers/net/mdio/mdio-bitbang.c | 1 + drivers/net/mdio/mdio-gpio.c | 6 +- drivers/net/mdio/mdio-hisi-femac.c | 6 +- drivers/net/mdio/mdio-ipq4019.c | 6 +- drivers/net/mdio/mdio-ipq8064.c | 7 +- drivers/net/mdio/mdio-moxart.c | 6 +- drivers/net/mdio/mdio-mscc-miim.c | 6 +- drivers/net/mdio/mdio-mux-bcm-iproc.c | 6 +- drivers/net/mdio/mdio-mux-bcm6368.c | 6 +- drivers/net/mdio/mdio-mux-gpio.c | 5 +- drivers/net/mdio/mdio-mux-meson-g12a.c | 6 +- drivers/net/mdio/mdio-mux-meson-gxl.c | 6 +- drivers/net/mdio/mdio-mux-mmioreg.c | 6 +- drivers/net/mdio/mdio-mux-multiplexer.c | 6 +- drivers/net/mdio/mdio-octeon.c | 5 +- drivers/net/mdio/mdio-sun4i.c | 6 +- drivers/net/mdio/mdio-xgene.c | 27 +- drivers/net/mdio/of_mdio.c | 1 + drivers/net/netconsole.c | 155 +- drivers/net/netdevsim/bus.c | 12 + drivers/net/netdevsim/dev.c | 8 +- drivers/net/netdevsim/health.c | 118 +- drivers/net/netdevsim/netdev.c | 1 + drivers/net/netkit.c | 960 +++++ drivers/net/pcs/pcs-xpcs.c | 29 + drivers/net/pcs/pcs-xpcs.h | 2 + drivers/net/phy/Kconfig | 4 +- drivers/net/phy/amd.c | 33 +- drivers/net/phy/at803x.c | 6 +- drivers/net/phy/ax88796b.c | 2 +- drivers/net/phy/bcm-phy-ptp.c | 1 + drivers/net/phy/bcm87xx.c | 1 + drivers/net/phy/broadcom.c | 154 +- drivers/net/phy/dp83867.c | 137 + drivers/net/phy/mediatek-ge-soc.c | 147 +- drivers/net/phy/micrel.c | 28 +- drivers/net/phy/nxp-tja11xx.c | 6 +- drivers/net/phy/phy.c | 207 +- drivers/net/phy/phylink.c | 46 +- drivers/net/phy/realtek.c | 4 +- drivers/net/phy/sfp.c | 34 +- drivers/net/phy/smsc.c | 6 +- drivers/net/ppp/ppp_async.c | 4 + drivers/net/ppp/ppp_generic.c | 4 +- drivers/net/ppp/pppoe.c | 2 +- drivers/net/sungem_phy.c | 1 + drivers/net/tap.c | 1 + drivers/net/usb/ax88179_178a.c | 2 - drivers/net/usb/lan78xx.c | 2 +- drivers/net/usb/r8152.c | 67 +- drivers/net/usb/sr9800.c | 4 +- drivers/net/veth.c | 28 +- drivers/net/virtio_net.c | 39 +- drivers/net/vrf.c | 1 - drivers/net/vxlan/vxlan_core.c | 450 +- drivers/net/vxlan/vxlan_mdb.c | 190 +- drivers/net/vxlan/vxlan_private.h | 2 + drivers/net/wan/ixp4xx_hss.c | 4 +- drivers/net/wireguard/cookie.c | 2 +- drivers/net/wireguard/netlink.c | 2 +- drivers/net/wireguard/noise.c | 2 +- drivers/net/wireless/ath/ar5523/ar5523.c | 2 +- drivers/net/wireless/ath/ath10k/ce.h | 2 +- drivers/net/wireless/ath/ath10k/debug.c | 47 +- drivers/net/wireless/ath/ath10k/htt.h | 3 +- drivers/net/wireless/ath/ath10k/htt_rx.c | 1 - drivers/net/wireless/ath/ath10k/htt_tx.c | 16 +- drivers/net/wireless/ath/ath10k/mac.c | 26 +- drivers/net/wireless/ath/ath10k/pci.c | 2 +- drivers/net/wireless/ath/ath10k/spectral.c | 26 +- drivers/net/wireless/ath/ath11k/Makefile | 3 +- drivers/net/wireless/ath/ath11k/ahb.c | 10 +- drivers/net/wireless/ath/ath11k/core.c | 121 +- drivers/net/wireless/ath/ath11k/core.h | 27 +- drivers/net/wireless/ath/ath11k/debugfs.c | 33 +- drivers/net/wireless/ath/ath11k/debugfs.h | 12 +- drivers/net/wireless/ath/ath11k/debugfs_sta.c | 30 +- drivers/net/wireless/ath/ath11k/dp.c | 2 +- drivers/net/wireless/ath/ath11k/dp_rx.c | 31 +- drivers/net/wireless/ath/ath11k/dp_tx.c | 4 +- drivers/net/wireless/ath/ath11k/fw.c | 168 + drivers/net/wireless/ath/ath11k/fw.h | 27 + drivers/net/wireless/ath/ath11k/hal.c | 8 +- drivers/net/wireless/ath/ath11k/hal_rx.c | 31 +- drivers/net/wireless/ath/ath11k/hal_rx.h | 18 +- drivers/net/wireless/ath/ath11k/hal_tx.c | 2 +- drivers/net/wireless/ath/ath11k/hif.h | 54 +- drivers/net/wireless/ath/ath11k/htc.h | 12 - drivers/net/wireless/ath/ath11k/mac.c | 122 +- drivers/net/wireless/ath/ath11k/mhi.c | 19 +- drivers/net/wireless/ath/ath11k/pcic.c | 10 +- drivers/net/wireless/ath/ath11k/peer.c | 2 +- drivers/net/wireless/ath/ath11k/qmi.c | 54 +- drivers/net/wireless/ath/ath11k/reg.c | 11 + drivers/net/wireless/ath/ath11k/reg.h | 3 + drivers/net/wireless/ath/ath11k/spectral.c | 28 +- drivers/net/wireless/ath/ath11k/thermal.c | 22 +- drivers/net/wireless/ath/ath11k/thermal.h | 8 +- drivers/net/wireless/ath/ath11k/wmi.c | 51 +- drivers/net/wireless/ath/ath12k/core.c | 141 +- drivers/net/wireless/ath/ath12k/core.h | 31 +- drivers/net/wireless/ath/ath12k/debug.c | 2 +- drivers/net/wireless/ath/ath12k/dp_mon.c | 16 +- drivers/net/wireless/ath/ath12k/dp_rx.c | 20 +- drivers/net/wireless/ath/ath12k/dp_tx.c | 9 +- drivers/net/wireless/ath/ath12k/hal.c | 16 +- drivers/net/wireless/ath/ath12k/hal_rx.c | 2 - drivers/net/wireless/ath/ath12k/hif.h | 18 +- drivers/net/wireless/ath/ath12k/hw.c | 27 +- drivers/net/wireless/ath/ath12k/hw.h | 6 + drivers/net/wireless/ath/ath12k/mac.c | 315 +- drivers/net/wireless/ath/ath12k/mac.h | 2 + drivers/net/wireless/ath/ath12k/mhi.c | 1 + drivers/net/wireless/ath/ath12k/pci.c | 4 +- drivers/net/wireless/ath/ath12k/qmi.c | 12 + drivers/net/wireless/ath/ath12k/qmi.h | 1 + drivers/net/wireless/ath/ath12k/reg.c | 14 + drivers/net/wireless/ath/ath12k/reg.h | 6 + drivers/net/wireless/ath/ath12k/rx_desc.h | 91 +- drivers/net/wireless/ath/ath12k/wmi.c | 114 +- drivers/net/wireless/ath/ath12k/wmi.h | 28 + drivers/net/wireless/ath/ath5k/base.c | 6 +- drivers/net/wireless/ath/ath5k/led.c | 3 +- drivers/net/wireless/ath/ath5k/pci.c | 4 +- drivers/net/wireless/ath/ath6kl/cfg80211.c | 8 +- drivers/net/wireless/ath/ath6kl/init.c | 2 +- drivers/net/wireless/ath/ath6kl/main.c | 4 +- drivers/net/wireless/ath/ath6kl/txrx.c | 2 +- drivers/net/wireless/ath/ath9k/Kconfig | 4 +- drivers/net/wireless/ath/ath9k/ar9003_phy.c | 11 +- drivers/net/wireless/ath/ath9k/hif_usb.c | 34 +- drivers/net/wireless/ath/ath9k/hif_usb.h | 2 +- drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | 7 +- drivers/net/wireless/ath/ath9k/xmit.c | 2 +- drivers/net/wireless/ath/carl9170/usb.c | 10 +- drivers/net/wireless/ath/dfs_pattern_detector.c | 21 +- drivers/net/wireless/ath/wcn36xx/dxe.c | 6 +- drivers/net/wireless/ath/wcn36xx/smd.c | 20 +- drivers/net/wireless/ath/wcn36xx/smd.h | 2 +- drivers/net/wireless/ath/wcn36xx/testmode.c | 2 +- drivers/net/wireless/ath/wil6210/cfg80211.c | 3 +- drivers/net/wireless/ath/wil6210/wmi.c | 2 - drivers/net/wireless/atmel/atmel.c | 72 - drivers/net/wireless/broadcom/b43/dma.c | 4 +- drivers/net/wireless/broadcom/b43/pio.c | 2 +- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 11 +- .../broadcom/brcm80211/brcmfmac/firmware.c | 6 +- .../broadcom/brcm80211/brcmfmac/firmware.h | 2 +- .../wireless/broadcom/brcm80211/brcmfmac/fweh.c | 6 +- .../broadcom/brcm80211/brcmfmac/fwil_types.h | 2 +- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 20 +- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 23 +- drivers/net/wireless/intel/ipw2x00/libipw.h | 2 +- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 2 +- drivers/net/wireless/intel/iwlegacy/common.c | 2 +- drivers/net/wireless/intel/iwlwifi/cfg/ax210.c | 2 +- drivers/net/wireless/intel/iwlwifi/cfg/bz.c | 2 +- drivers/net/wireless/intel/iwlwifi/cfg/sc.c | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/commands.h | 33 +- drivers/net/wireless/intel/iwlwifi/dvm/dev.h | 14 +- drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c | 6 +- drivers/net/wireless/intel/iwlwifi/dvm/main.c | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/rs.h | 12 +- drivers/net/wireless/intel/iwlwifi/dvm/tt.h | 9 +- drivers/net/wireless/intel/iwlwifi/dvm/tx.c | 4 +- drivers/net/wireless/intel/iwlwifi/fw/acpi.c | 59 +- drivers/net/wireless/intel/iwlwifi/fw/acpi.h | 8 +- .../net/wireless/intel/iwlwifi/fw/api/commands.h | 30 + drivers/net/wireless/intel/iwlwifi/fw/api/d3.h | 46 +- .../net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h | 37 +- drivers/net/wireless/intel/iwlwifi/fw/api/debug.h | 22 + .../net/wireless/intel/iwlwifi/fw/api/mac-cfg.h | 10 +- .../net/wireless/intel/iwlwifi/fw/api/nvm-reg.h | 68 +- .../net/wireless/intel/iwlwifi/fw/api/offload.h | 6 +- .../net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h | 4 +- drivers/net/wireless/intel/iwlwifi/fw/api/power.h | 7 +- drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h | 7 +- drivers/net/wireless/intel/iwlwifi/fw/api/rx.h | 16 +- drivers/net/wireless/intel/iwlwifi/fw/api/stats.h | 153 +- .../net/wireless/intel/iwlwifi/fw/api/time-event.h | 78 +- drivers/net/wireless/intel/iwlwifi/fw/api/txq.h | 4 +- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 203 +- drivers/net/wireless/intel/iwlwifi/fw/dbg.h | 1 + drivers/net/wireless/intel/iwlwifi/fw/debugfs.c | 14 +- drivers/net/wireless/intel/iwlwifi/fw/file.h | 32 +- drivers/net/wireless/intel/iwlwifi/fw/img.h | 4 +- drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h | 3 +- drivers/net/wireless/intel/iwlwifi/fw/rs.c | 1 - drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 4 + drivers/net/wireless/intel/iwlwifi/fw/uefi.c | 50 + drivers/net/wireless/intel/iwlwifi/fw/uefi.h | 17 + drivers/net/wireless/intel/iwlwifi/iwl-config.h | 5 - .../wireless/intel/iwlwifi/iwl-context-info-gen3.h | 10 +- drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c | 2 + drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h | 8 +- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 40 +- drivers/net/wireless/intel/iwlwifi/iwl-drv.h | 2 +- .../net/wireless/intel/iwlwifi/iwl-eeprom-parse.c | 5 +- .../net/wireless/intel/iwlwifi/iwl-eeprom-parse.h | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-fh.h | 13 +- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 88 +- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 19 +- drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 16 +- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 13 +- drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 2 + drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 167 +- .../net/wireless/intel/iwlwifi/mvm/debugfs-vif.c | 76 +- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 148 +- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 141 +- drivers/net/wireless/intel/iwlwifi/mvm/link.c | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 20 +- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 443 +- .../net/wireless/intel/iwlwifi/mvm/mld-mac80211.c | 166 +- drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c | 23 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 167 +- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 14 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 44 +- drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c | 72 +- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 5 + drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 23 +- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 157 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 339 +- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 27 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 12 +- drivers/net/wireless/intel/iwlwifi/mvm/tdls.c | 7 +- .../net/wireless/intel/iwlwifi/mvm/time-event.c | 170 +- .../net/wireless/intel/iwlwifi/mvm/time-event.h | 21 +- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 7 - drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 94 +- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 61 + drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 7 +- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 59 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 12 +- .../net/wireless/intel/iwlwifi/pcie/trans-gen2.c | 5 +- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 45 +- drivers/net/wireless/intel/iwlwifi/queue/tx.h | 6 +- drivers/net/wireless/intersil/hostap/hostap.h | 1 - .../net/wireless/intersil/hostap/hostap_download.c | 3 +- .../net/wireless/intersil/hostap/hostap_ioctl.c | 228 -- drivers/net/wireless/intersil/hostap/hostap_main.c | 3 - drivers/net/wireless/intersil/hostap/hostap_wlan.h | 2 +- drivers/net/wireless/intersil/p54/p54.h | 2 +- drivers/net/wireless/legacy/ray_cs.c | 6 +- drivers/net/wireless/marvell/mwifiex/11h.c | 4 +- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 3 +- drivers/net/wireless/marvell/mwifiex/main.h | 4 +- drivers/net/wireless/marvell/mwifiex/pcie.c | 322 +- drivers/net/wireless/marvell/mwifiex/sdio.c | 10 - drivers/net/wireless/marvell/mwifiex/sdio.h | 4 - drivers/net/wireless/mediatek/mt76/Kconfig | 1 + drivers/net/wireless/mediatek/mt76/Makefile | 1 + drivers/net/wireless/mediatek/mt76/debugfs.c | 2 - drivers/net/wireless/mediatek/mt76/dma.c | 11 +- drivers/net/wireless/mediatek/mt76/eeprom.c | 7 +- drivers/net/wireless/mediatek/mt76/mac80211.c | 55 +- drivers/net/wireless/mediatek/mt76/mt76.h | 32 +- drivers/net/wireless/mediatek/mt76/mt7603/init.c | 8 + drivers/net/wireless/mediatek/mt76/mt7603/main.c | 4 +- drivers/net/wireless/mediatek/mt76/mt7615/init.c | 5 +- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 4 +- drivers/net/wireless/mediatek/mt76/mt76_connac.h | 6 + .../net/wireless/mediatek/mt76/mt76_connac3_mac.h | 16 +- .../net/wireless/mediatek/mt76/mt76_connac_mac.c | 4 +- .../net/wireless/mediatek/mt76/mt76_connac_mcu.c | 184 +- .../net/wireless/mediatek/mt76/mt76_connac_mcu.h | 60 +- .../net/wireless/mediatek/mt76/mt76x02_beacon.c | 8 +- drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c | 11 +- .../net/wireless/mediatek/mt76/mt76x02_usb_core.c | 13 +- drivers/net/wireless/mediatek/mt76/mt76x02_util.c | 4 +- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 33 +- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 43 +- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 10 +- drivers/net/wireless/mediatek/mt76/mt7915/mmio.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 2 + drivers/net/wireless/mediatek/mt76/mt7915/regs.h | 1 + drivers/net/wireless/mediatek/mt76/mt7915/soc.c | 5 +- drivers/net/wireless/mediatek/mt76/mt7921/init.c | 76 +- drivers/net/wireless/mediatek/mt76/mt7921/mac.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 98 +- drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 156 +- drivers/net/wireless/mediatek/mt76/mt7921/mcu.h | 13 + drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h | 19 +- drivers/net/wireless/mediatek/mt76/mt7921/pci.c | 3 + .../net/wireless/mediatek/mt76/mt7921/sdio_mcu.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7921/usb.c | 12 +- drivers/net/wireless/mediatek/mt76/mt7925/Kconfig | 30 + drivers/net/wireless/mediatek/mt76/mt7925/Makefile | 9 + .../net/wireless/mediatek/mt76/mt7925/debugfs.c | 319 ++ drivers/net/wireless/mediatek/mt76/mt7925/init.c | 235 ++ drivers/net/wireless/mediatek/mt76/mt7925/mac.c | 1452 +++++++ drivers/net/wireless/mediatek/mt76/mt7925/mac.h | 23 + drivers/net/wireless/mediatek/mt76/mt7925/main.c | 1454 +++++++ drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 3174 +++++++++++++++ drivers/net/wireless/mediatek/mt76/mt7925/mcu.h | 537 +++ drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h | 309 ++ drivers/net/wireless/mediatek/mt76/mt7925/pci.c | 586 +++ .../net/wireless/mediatek/mt76/mt7925/pci_mac.c | 148 + .../net/wireless/mediatek/mt76/mt7925/pci_mcu.c | 53 + drivers/net/wireless/mediatek/mt76/mt7925/regs.h | 92 + drivers/net/wireless/mediatek/mt76/mt7925/usb.c | 332 ++ drivers/net/wireless/mediatek/mt76/mt792x.h | 37 + drivers/net/wireless/mediatek/mt76/mt792x_core.c | 24 +- drivers/net/wireless/mediatek/mt76/mt792x_dma.c | 49 +- drivers/net/wireless/mediatek/mt76/mt792x_usb.c | 9 + drivers/net/wireless/mediatek/mt76/mt7996/init.c | 44 +- drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 99 +- drivers/net/wireless/mediatek/mt76/mt7996/main.c | 47 +- drivers/net/wireless/mediatek/mt76/mt7996/mcu.c | 292 +- drivers/net/wireless/mediatek/mt76/mt7996/mcu.h | 26 + drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h | 2 +- drivers/net/wireless/mediatek/mt76/mt7996/pci.c | 8 +- drivers/net/wireless/mediatek/mt76/mt7996/regs.h | 8 + drivers/net/wireless/mediatek/mt76/tx.c | 108 +- drivers/net/wireless/mediatek/mt7601u/tx.c | 2 +- drivers/net/wireless/mediatek/mt7601u/usb.c | 1 + drivers/net/wireless/microchip/wilc1000/cfg80211.c | 4 +- drivers/net/wireless/microchip/wilc1000/netdev.c | 20 +- drivers/net/wireless/microchip/wilc1000/netdev.h | 2 + drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 4 +- drivers/net/wireless/quantenna/qtnfmac/commands.c | 5 +- drivers/net/wireless/quantenna/qtnfmac/core.c | 2 +- drivers/net/wireless/quantenna/qtnfmac/event.c | 4 +- drivers/net/wireless/ralink/rt2x00/rt2800.h | 18 + drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 316 +- drivers/net/wireless/ralink/rt2x00/rt2800mmio.c | 3 + drivers/net/wireless/ralink/rt2x00/rt2x00.h | 6 + drivers/net/wireless/ralink/rt2x00/rt2x00dev.c | 5 +- drivers/net/wireless/ralink/rt2x00/rt2x00mac.c | 11 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 14 + drivers/net/wireless/realtek/rtlwifi/base.c | 6 - drivers/net/wireless/realtek/rtlwifi/core.c | 18 +- drivers/net/wireless/realtek/rtlwifi/ps.c | 17 +- .../net/wireless/realtek/rtlwifi/rtl8188ee/hw.c | 5 - .../net/wireless/realtek/rtlwifi/rtl8188ee/trx.c | 8 +- .../net/wireless/realtek/rtlwifi/rtl8188ee/trx.h | 1 - .../net/wireless/realtek/rtlwifi/rtl8192ce/hw.c | 5 - .../net/wireless/realtek/rtlwifi/rtl8192ce/trx.c | 9 +- .../net/wireless/realtek/rtlwifi/rtl8192ce/trx.h | 1 - .../net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | 1 - .../net/wireless/realtek/rtlwifi/rtl8192cu/trx.c | 34 +- .../net/wireless/realtek/rtlwifi/rtl8192cu/trx.h | 7 +- .../net/wireless/realtek/rtlwifi/rtl8192de/dm.c | 18 +- .../net/wireless/realtek/rtlwifi/rtl8192de/fw.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192de/hw.c | 4 - .../net/wireless/realtek/rtlwifi/rtl8192de/trx.c | 8 +- .../net/wireless/realtek/rtlwifi/rtl8192de/trx.h | 1 - .../net/wireless/realtek/rtlwifi/rtl8192ee/dm.c | 7 +- .../net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 1 - .../net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 16 +- .../net/wireless/realtek/rtlwifi/rtl8192ee/trx.h | 4 - .../net/wireless/realtek/rtlwifi/rtl8192se/fw.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192se/trx.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192se/trx.h | 4 +- .../wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c | 16 +- .../net/wireless/realtek/rtlwifi/rtl8723ae/hw.c | 5 - .../net/wireless/realtek/rtlwifi/rtl8723ae/phy.c | 6 +- .../net/wireless/realtek/rtlwifi/rtl8723ae/trx.c | 8 +- .../net/wireless/realtek/rtlwifi/rtl8723ae/trx.h | 1 - .../net/wireless/realtek/rtlwifi/rtl8723be/dm.c | 7 +- .../net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 6 - .../net/wireless/realtek/rtlwifi/rtl8723be/phy.c | 4 +- .../net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 1 - .../net/wireless/realtek/rtlwifi/rtl8723be/trx.h | 1 - .../net/wireless/realtek/rtlwifi/rtl8821ae/dm.c | 6 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 5 - .../net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 5 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/trx.h | 1 - drivers/net/wireless/realtek/rtlwifi/wifi.h | 15 +- drivers/net/wireless/realtek/rtw88/debug.h | 12 + drivers/net/wireless/realtek/rtw88/fw.c | 74 + drivers/net/wireless/realtek/rtw88/fw.h | 3 + drivers/net/wireless/realtek/rtw88/main.h | 10 +- drivers/net/wireless/realtek/rtw88/ps.c | 2 + drivers/net/wireless/realtek/rtw88/reg.h | 23 + drivers/net/wireless/realtek/rtw88/regd.c | 24 +- drivers/net/wireless/realtek/rtw88/regd.h | 2 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 67 + drivers/net/wireless/realtek/rtw88/rtw8821c.h | 1 + .../net/wireless/realtek/rtw88/rtw8821c_table.c | 1154 ++++-- .../net/wireless/realtek/rtw88/rtw8822c_table.c | 1239 +++--- drivers/net/wireless/realtek/rtw88/rtw8822cu.c | 4 +- drivers/net/wireless/realtek/rtw89/chan.c | 1652 +++++++- drivers/net/wireless/realtek/rtw89/chan.h | 34 + drivers/net/wireless/realtek/rtw89/coex.c | 29 +- drivers/net/wireless/realtek/rtw89/core.c | 467 ++- drivers/net/wireless/realtek/rtw89/core.h | 441 +- drivers/net/wireless/realtek/rtw89/debug.c | 286 +- drivers/net/wireless/realtek/rtw89/fw.c | 729 +++- drivers/net/wireless/realtek/rtw89/fw.h | 144 +- drivers/net/wireless/realtek/rtw89/mac.c | 311 +- drivers/net/wireless/realtek/rtw89/mac.h | 50 +- drivers/net/wireless/realtek/rtw89/mac80211.c | 22 +- drivers/net/wireless/realtek/rtw89/mac_be.c | 397 ++ drivers/net/wireless/realtek/rtw89/pci.c | 3 +- drivers/net/wireless/realtek/rtw89/phy.c | 515 ++- drivers/net/wireless/realtek/rtw89/phy.h | 136 +- drivers/net/wireless/realtek/rtw89/phy_be.c | 576 +++ drivers/net/wireless/realtek/rtw89/reg.h | 426 +- drivers/net/wireless/realtek/rtw89/regd.c | 4 +- drivers/net/wireless/realtek/rtw89/rtw8851b.c | 29 +- .../net/wireless/realtek/rtw89/rtw8851b_table.c | 1337 +++--- .../net/wireless/realtek/rtw89/rtw8851b_table.h | 3 - drivers/net/wireless/realtek/rtw89/rtw8852a.c | 28 +- .../net/wireless/realtek/rtw89/rtw8852a_table.c | 2 + .../net/wireless/realtek/rtw89/rtw8852a_table.h | 1 - drivers/net/wireless/realtek/rtw89/rtw8852b.c | 37 +- .../net/wireless/realtek/rtw89/rtw8852b_table.c | 333 +- .../net/wireless/realtek/rtw89/rtw8852b_table.h | 3 - drivers/net/wireless/realtek/rtw89/rtw8852c.c | 57 +- drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 107 +- drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 3 + .../wireless/realtek/rtw89/rtw8852c_rfk_table.c | 42 +- .../net/wireless/realtek/rtw89/rtw8852c_table.c | 3782 +++++++++++++++-- .../net/wireless/realtek/rtw89/rtw8852c_table.h | 3 - drivers/net/wireless/realtek/rtw89/txrx.h | 271 ++ drivers/net/wireless/realtek/rtw89/wow.c | 4 +- drivers/net/wireless/silabs/wfx/data_tx.c | 54 +- drivers/net/wireless/silabs/wfx/data_tx.h | 21 +- drivers/net/wireless/silabs/wfx/hif_tx.c | 43 + drivers/net/wireless/silabs/wfx/hif_tx.h | 1 + drivers/net/wireless/silabs/wfx/main.c | 5 + drivers/net/wireless/silabs/wfx/queue.c | 38 +- drivers/net/wireless/silabs/wfx/queue.h | 1 + drivers/net/wireless/silabs/wfx/scan.c | 66 +- drivers/net/wireless/silabs/wfx/scan.h | 6 + drivers/net/wireless/silabs/wfx/sta.c | 83 +- drivers/net/wireless/silabs/wfx/sta.h | 1 - drivers/net/wireless/silabs/wfx/wfx.h | 8 +- drivers/net/wireless/st/cw1200/txrx.c | 4 +- drivers/net/wireless/ti/wl1251/main.c | 2 +- drivers/net/wireless/ti/wl1251/tx.c | 6 +- drivers/net/wireless/ti/wl12xx/main.c | 6 +- drivers/net/wireless/ti/wl18xx/main.c | 7 +- drivers/net/wireless/ti/wlcore/boot.c | 5 +- drivers/net/wireless/ti/wlcore/event.c | 2 +- drivers/net/wireless/ti/wlcore/main.c | 16 +- drivers/net/wireless/ti/wlcore/wlcore.h | 2 +- drivers/net/wireless/virtual/mac80211_hwsim.c | 56 +- drivers/net/wireless/virtual/mac80211_hwsim.h | 19 +- drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h | 2 +- drivers/net/wwan/iosm/iosm_ipc_imem_ops.h | 4 +- drivers/net/wwan/iosm/iosm_ipc_mux.h | 2 +- drivers/net/wwan/iosm/iosm_ipc_pm.h | 2 +- drivers/net/wwan/iosm/iosm_ipc_port.h | 2 +- drivers/net/wwan/iosm/iosm_ipc_trace.h | 2 +- drivers/net/wwan/rpmsg_wwan_ctrl.c | 2 +- drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 2 +- drivers/net/wwan/t7xx/t7xx_state_monitor.c | 3 +- drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 +- drivers/net/wwan/wwan_core.c | 9 +- drivers/net/xen-netback/interface.c | 8 +- drivers/net/xen-netback/netback.c | 84 +- 1105 files changed, 71094 insertions(+), 18021 deletions(-) delete mode 100644 drivers/net/appletalk/Kconfig delete mode 100644 drivers/net/appletalk/Makefile delete mode 100644 drivers/net/appletalk/ipddp.c delete mode 100644 drivers/net/appletalk/ipddp.h create mode 100644 drivers/net/dsa/microchip/ksz9477_acl.c create mode 100644 drivers/net/dsa/microchip/ksz9477_tc_flower.c create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_debug.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_devlink.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_devlink.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_io.h delete mode 100644 drivers/net/ethernet/intel/i40e/i40e_osdep.h delete mode 100644 drivers/net/ethernet/intel/iavf/iavf_client.c delete mode 100644 drivers/net/ethernet/intel/iavf/iavf_client.h create mode 100644 drivers/net/ethernet/intel/ice/ice_dpll.c create mode 100644 drivers/net/ethernet/intel/ice/ice_dpll.h create mode 100644 drivers/net/ethernet/intel/idpf/Makefile create mode 100644 drivers/net/ethernet/intel/idpf/idpf.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq_api.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_dev.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_devids.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_ethtool.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_lib.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_main.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_mem.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_txrx.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_txrx.h create mode 100644 drivers/net/ethernet/intel/idpf/idpf_vf_dev.c create mode 100644 drivers/net/ethernet/intel/idpf/idpf_virtchnl.c create mode 100644 drivers/net/ethernet/intel/idpf/virtchnl2.h create mode 100644 drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/dpll.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c delete mode 100644 drivers/net/ethernet/ti/cpmac.c create mode 100644 drivers/net/mctp/mctp-i3c.c create mode 100644 drivers/net/netkit.c create mode 100644 drivers/net/wireless/ath/ath11k/fw.c create mode 100644 drivers/net/wireless/ath/ath11k/fw.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/Kconfig create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/Makefile create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/init.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/mac.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/mac.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/main.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/mcu.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/pci.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/regs.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7925/usb.c (limited to 'drivers/net') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 44eeb5d61b..af0da4bb42 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -448,6 +448,15 @@ config NLMON diagnostics, etc. This is mostly intended for developers or support to debug netlink issues. If unsure, say N. +config NETKIT + bool "BPF-programmable network device" + depends on BPF_SYSCALL + help + The netkit device is a virtual networking device where BPF programs + can be attached to the device(s) transmission routine in order to + implement the driver's internal logic. The device can be configured + to operate in L3 or L2 mode. If unsure, say N. + config NET_VRF tristate "Virtual Routing and Forwarding (Lite)" depends on IP_MULTIPLE_TABLES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index e26f98f897..7cab36f947 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO) += mdio.o obj-$(CONFIG_NET) += loopback.o obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o obj-$(CONFIG_NETCONSOLE) += netconsole.o +obj-$(CONFIG_NETKIT) += netkit.o obj-y += phy/ obj-y += pse-pd/ obj-y += mdio/ @@ -45,7 +46,6 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o # Networking Drivers # obj-$(CONFIG_ARCNET) += arcnet/ -obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_NET_DSA) += dsa/ diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 83214e2e70..dc50797a2e 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -247,12 +247,6 @@ static int __init net_olddevs_init(void) for (num = 0; num < 8; ++num) ethif_probe2(num); -#ifdef CONFIG_COPS - cops_probe(0); - cops_probe(1); - cops_probe(2); -#endif - return 0; } diff --git a/drivers/net/amt.c b/drivers/net/amt.c index ddd087c2c3..68e79b1272 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -3449,5 +3449,6 @@ static void __exit amt_fini(void) module_exit(amt_fini); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Driver for Automatic Multicast Tunneling (AMT)"); MODULE_AUTHOR("Taehee Yoo "); MODULE_ALIAS_RTNL_LINK("amt"); diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig deleted file mode 100644 index b38ed52b82..0000000000 --- a/drivers/net/appletalk/Kconfig +++ /dev/null @@ -1,102 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Appletalk driver configuration -# -config ATALK - tristate "Appletalk protocol support" - select LLC - help - AppleTalk is the protocol that Apple computers can use to communicate - on a network. If your Linux box is connected to such a network and you - wish to connect to it, say Y. You will need to use the netatalk package - so that your Linux box can act as a print and file server for Macs as - well as access AppleTalk printers. Check out - on the WWW for details. - EtherTalk is the name used for AppleTalk over Ethernet and the - cheaper and slower LocalTalk is AppleTalk over a proprietary Apple - network using serial links. EtherTalk and LocalTalk are fully - supported by Linux. - - General information about how to connect Linux, Windows machines and - Macs is on the WWW at . The - NET3-4-HOWTO, available from - , contains valuable - information as well. - - To compile this driver as a module, choose M here: the module will be - called appletalk. You almost certainly want to compile it as a - module so you can restart your AppleTalk stack without rebooting - your machine. I hear that the GNU boycott of Apple is over, so - even politically correct people are allowed to say Y here. - -config DEV_APPLETALK - tristate "Appletalk interfaces support" - depends on ATALK - help - AppleTalk is the protocol that Apple computers can use to communicate - on a network. If your Linux box is connected to such a network, and wish - to do IP over it, or you have a LocalTalk card and wish to use it to - connect to the AppleTalk network, say Y. - - -config COPS - tristate "COPS LocalTalk PC support" - depends on DEV_APPLETALK && ISA - depends on NETDEVICES - select NETDEV_LEGACY_INIT - help - This allows you to use COPS AppleTalk cards to connect to LocalTalk - networks. You also need version 1.3.3 or later of the netatalk - package. This driver is experimental, which means that it may not - work. This driver will only work if you choose "AppleTalk DDP" - networking support, above. - Please read the file - . - -config COPS_DAYNA - bool "Dayna firmware support" - depends on COPS - help - Support COPS compatible cards with Dayna style firmware (Dayna - DL2000/ Daynatalk/PC (half length), COPS LT-95, Farallon PhoneNET PC - III, Farallon PhoneNET PC II). - -config COPS_TANGENT - bool "Tangent firmware support" - depends on COPS - help - Support COPS compatible cards with Tangent style firmware (Tangent - ATB_II, Novell NL-1000, Daystar Digital LT-200. - -config IPDDP - tristate "Appletalk-IP driver support" - depends on DEV_APPLETALK && ATALK - help - This allows IP networking for users who only have AppleTalk - networking available. This feature is experimental. With this - driver, you can encapsulate IP inside AppleTalk (e.g. if your Linux - box is stuck on an AppleTalk only network) or decapsulate (e.g. if - you want your Linux box to act as an Internet gateway for a zoo of - AppleTalk connected Macs). Please see the file - for more information. - - If you say Y here, the AppleTalk-IP support will be compiled into - the kernel. In this case, you can either use encapsulation or - decapsulation, but not both. With the following two questions, you - decide which one you want. - - To compile the AppleTalk-IP support as a module, choose M here: the - module will be called ipddp. - In this case, you will be able to use both encapsulation and - decapsulation simultaneously, by loading two copies of the module - and specifying different values for the module option ipddp_mode. - -config IPDDP_ENCAP - bool "IP to Appletalk-IP Encapsulation support" - depends on IPDDP - help - If you say Y here, the AppleTalk-IP code will be able to encapsulate - IP packets inside AppleTalk frames; this is useful if your Linux box - is stuck on an AppleTalk network (which hopefully contains a - decapsulator somewhere). Please see - for more information. diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile deleted file mode 100644 index 6db2943ce5..0000000000 --- a/drivers/net/appletalk/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for drivers/net/appletalk -# - -obj-$(CONFIG_IPDDP) += ipddp.o -obj-$(CONFIG_COPS) += cops.o diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c deleted file mode 100644 index d558535390..0000000000 --- a/drivers/net/appletalk/ipddp.c +++ /dev/null @@ -1,345 +0,0 @@ -/* - * ipddp.c: IP to Appletalk-IP Encapsulation driver for Linux - * Appletalk-IP to IP Decapsulation driver for Linux - * - * Authors: - * - DDP-IP Encap by: Bradford W. Johnson - * - DDP-IP Decap by: Jay Schulist - * - * Derived from: - * - Almost all code already existed in net/appletalk/ddp.c I just - * moved/reorginized it into a driver file. Original IP-over-DDP code - * was done by Bradford W. Johnson - * - skeleton.c: A network driver outline for linux. - * Written 1993-94 by Donald Becker. - * - dummy.c: A dummy net driver. By Nick Holloway. - * - MacGate: A user space Daemon for Appletalk-IP Decap for - * Linux by Jay Schulist - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ipddp.h" /* Our stuff */ - -static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson \n"; - -static struct ipddp_route *ipddp_route_list; -static DEFINE_SPINLOCK(ipddp_route_lock); - -#ifdef CONFIG_IPDDP_ENCAP -static int ipddp_mode = IPDDP_ENCAP; -#else -static int ipddp_mode = IPDDP_DECAP; -#endif - -/* Index to functions, as function prototypes. */ -static netdev_tx_t ipddp_xmit(struct sk_buff *skb, - struct net_device *dev); -static int ipddp_create(struct ipddp_route *new_rt); -static int ipddp_delete(struct ipddp_route *rt); -static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt); -static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd); - -static const struct net_device_ops ipddp_netdev_ops = { - .ndo_start_xmit = ipddp_xmit, - .ndo_siocdevprivate = ipddp_siocdevprivate, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static struct net_device * __init ipddp_init(void) -{ - static unsigned version_printed; - struct net_device *dev; - int err; - - dev = alloc_etherdev(0); - if (!dev) - return ERR_PTR(-ENOMEM); - - netif_keep_dst(dev); - strcpy(dev->name, "ipddp%d"); - - if (version_printed++ == 0) - printk(version); - - /* Initialize the device structure. */ - dev->netdev_ops = &ipddp_netdev_ops; - - dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */ - dev->mtu = 585; - dev->flags |= IFF_NOARP; - - /* - * The worst case header we will need is currently a - * ethernet header (14 bytes) and a ddp header (sizeof ddpehdr+1) - * We send over SNAP so that takes another 8 bytes. - */ - dev->hard_header_len = 14+8+sizeof(struct ddpehdr)+1; - - err = register_netdev(dev); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } - - /* Let the user now what mode we are in */ - if(ipddp_mode == IPDDP_ENCAP) - printk("%s: Appletalk-IP Encap. mode by Bradford W. Johnson \n", - dev->name); - if(ipddp_mode == IPDDP_DECAP) - printk("%s: Appletalk-IP Decap. mode by Jay Schulist \n", - dev->name); - - return dev; -} - - -/* - * Transmit LLAP/ELAP frame using aarp_send_ddp. - */ -static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct rtable *rtable = skb_rtable(skb); - __be32 paddr = 0; - struct ddpehdr *ddp; - struct ipddp_route *rt; - struct atalk_addr *our_addr; - - if (rtable->rt_gw_family == AF_INET) - paddr = rtable->rt_gw4; - - spin_lock(&ipddp_route_lock); - - /* - * Find appropriate route to use, based only on IP number. - */ - for(rt = ipddp_route_list; rt != NULL; rt = rt->next) - { - if(rt->ip == paddr) - break; - } - if(rt == NULL) { - spin_unlock(&ipddp_route_lock); - return NETDEV_TX_OK; - } - - our_addr = atalk_find_dev_addr(rt->dev); - - if(ipddp_mode == IPDDP_DECAP) - /* - * Pull off the excess room that should not be there. - * This is due to a hard-header problem. This is the - * quick fix for now though, till it breaks. - */ - skb_pull(skb, 35-(sizeof(struct ddpehdr)+1)); - - /* Create the Extended DDP header */ - ddp = (struct ddpehdr *)skb->data; - ddp->deh_len_hops = htons(skb->len + (1<<10)); - ddp->deh_sum = 0; - - /* - * For Localtalk we need aarp_send_ddp to strip the - * long DDP header and place a shot DDP header on it. - */ - if(rt->dev->type == ARPHRD_LOCALTLK) - { - ddp->deh_dnet = 0; /* FIXME more hops?? */ - ddp->deh_snet = 0; - } - else - { - ddp->deh_dnet = rt->at.s_net; /* FIXME more hops?? */ - ddp->deh_snet = our_addr->s_net; - } - ddp->deh_dnode = rt->at.s_node; - ddp->deh_snode = our_addr->s_node; - ddp->deh_dport = 72; - ddp->deh_sport = 72; - - *((__u8 *)(ddp+1)) = 22; /* ddp type = IP */ - - skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */ - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - aarp_send_ddp(rt->dev, skb, &rt->at, NULL); - - spin_unlock(&ipddp_route_lock); - - return NETDEV_TX_OK; -} - -/* - * Create a routing entry. We first verify that the - * record does not already exist. If it does we return -EEXIST - */ -static int ipddp_create(struct ipddp_route *new_rt) -{ - struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL); - - if (rt == NULL) - return -ENOMEM; - - rt->ip = new_rt->ip; - rt->at = new_rt->at; - rt->next = NULL; - if ((rt->dev = atrtr_get_dev(&rt->at)) == NULL) { - kfree(rt); - return -ENETUNREACH; - } - - spin_lock_bh(&ipddp_route_lock); - if (__ipddp_find_route(rt)) { - spin_unlock_bh(&ipddp_route_lock); - kfree(rt); - return -EEXIST; - } - - rt->next = ipddp_route_list; - ipddp_route_list = rt; - - spin_unlock_bh(&ipddp_route_lock); - - return 0; -} - -/* - * Delete a route, we only delete a FULL match. - * If route does not exist we return -ENOENT. - */ -static int ipddp_delete(struct ipddp_route *rt) -{ - struct ipddp_route **r = &ipddp_route_list; - struct ipddp_route *tmp; - - spin_lock_bh(&ipddp_route_lock); - while((tmp = *r) != NULL) - { - if(tmp->ip == rt->ip && - tmp->at.s_net == rt->at.s_net && - tmp->at.s_node == rt->at.s_node) - { - *r = tmp->next; - spin_unlock_bh(&ipddp_route_lock); - kfree(tmp); - return 0; - } - r = &tmp->next; - } - - spin_unlock_bh(&ipddp_route_lock); - return -ENOENT; -} - -/* - * Find a routing entry, we only return a FULL match - */ -static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt) -{ - struct ipddp_route *f; - - for(f = ipddp_route_list; f != NULL; f = f->next) - { - if(f->ip == rt->ip && - f->at.s_net == rt->at.s_net && - f->at.s_node == rt->at.s_node) - return f; - } - - return NULL; -} - -static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd) -{ - struct ipddp_route rcp, rcp2, *rp; - - if (in_compat_syscall()) - return -EOPNOTSUPP; - - if(!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (copy_from_user(&rcp, data, sizeof(rcp))) - return -EFAULT; - - switch(cmd) - { - case SIOCADDIPDDPRT: - return ipddp_create(&rcp); - - case SIOCFINDIPDDPRT: - spin_lock_bh(&ipddp_route_lock); - rp = __ipddp_find_route(&rcp); - if (rp) { - memset(&rcp2, 0, sizeof(rcp2)); - rcp2.ip = rp->ip; - rcp2.at = rp->at; - rcp2.flags = rp->flags; - } - spin_unlock_bh(&ipddp_route_lock); - - if (rp) { - if (copy_to_user(data, &rcp2, - sizeof(struct ipddp_route))) - return -EFAULT; - return 0; - } else - return -ENOENT; - - case SIOCDELIPDDPRT: - return ipddp_delete(&rcp); - - default: - return -EINVAL; - } -} - -static struct net_device *dev_ipddp; - -MODULE_LICENSE("GPL"); -module_param(ipddp_mode, int, 0); - -static int __init ipddp_init_module(void) -{ - dev_ipddp = ipddp_init(); - return PTR_ERR_OR_ZERO(dev_ipddp); -} - -static void __exit ipddp_cleanup_module(void) -{ - struct ipddp_route *p; - - unregister_netdev(dev_ipddp); - free_netdev(dev_ipddp); - - while (ipddp_route_list) { - p = ipddp_route_list->next; - kfree(ipddp_route_list); - ipddp_route_list = p; - } -} - -module_init(ipddp_init_module); -module_exit(ipddp_cleanup_module); diff --git a/drivers/net/appletalk/ipddp.h b/drivers/net/appletalk/ipddp.h deleted file mode 100644 index 9a8e45a469..0000000000 --- a/drivers/net/appletalk/ipddp.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ipddp.h: Header for IP-over-DDP driver for Linux. - */ - -#ifndef __LINUX_IPDDP_H -#define __LINUX_IPDDP_H - -#ifdef __KERNEL__ - -#define SIOCADDIPDDPRT (SIOCDEVPRIVATE) -#define SIOCDELIPDDPRT (SIOCDEVPRIVATE+1) -#define SIOCFINDIPDDPRT (SIOCDEVPRIVATE+2) - -struct ipddp_route -{ - struct net_device *dev; /* Carrier device */ - __be32 ip; /* IP address */ - struct atalk_addr at; /* Gateway appletalk address */ - int flags; - struct ipddp_route *next; -}; - -#define IPDDP_ENCAP 1 -#define IPDDP_DECAP 2 - -#endif /* __KERNEL__ */ -#endif /* __LINUX_IPDDP_H */ diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 683203f87a..31377bb1cc 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -306,8 +306,13 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (!sock) return -ESHUTDOWN; - rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info, - IPPROTO_UDP, use_cache); + sport = udp_flow_src_port(bareudp->net, skb, + bareudp->sport_min, USHRT_MAX, + true); + rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key, + sport, bareudp->port, key->tos, + use_cache ? + (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -315,9 +320,6 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, &rt->dst, BAREUDP_IPV4_HLEN + info->options_len, false); - sport = udp_flow_src_port(bareudp->net, skb, - bareudp->sport_min, USHRT_MAX, - true); tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; @@ -369,17 +371,19 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (!sock) return -ESHUTDOWN; - dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info, - IPPROTO_UDP, use_cache); + sport = udp_flow_src_port(bareudp->net, skb, + bareudp->sport_min, USHRT_MAX, + true); + dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr, + key, sport, bareudp->port, key->tos, + use_cache ? + (struct dst_cache *) &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, false); - sport = udp_flow_src_port(bareudp->net, skb, - bareudp->sport_min, USHRT_MAX, - true); prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -476,15 +480,21 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, struct ip_tunnel_info *info = skb_tunnel_info(skb); struct bareudp_dev *bareudp = netdev_priv(dev); bool use_cache; + __be16 sport; use_cache = ip_tunnel_dst_cache_usable(skb, info); + sport = udp_flow_src_port(bareudp->net, skb, + bareudp->sport_min, USHRT_MAX, + true); if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; - rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, - info, IPPROTO_UDP, use_cache); + rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, + &info->key, sport, bareudp->port, + info->key.tos, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -495,9 +505,10 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, struct in6_addr saddr; struct socket *sock = rcu_dereference(bareudp->sock); - dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, - &saddr, info, IPPROTO_UDP, - use_cache); + dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, + 0, &saddr, &info->key, + sport, bareudp->port, info->key.tos, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -507,9 +518,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, return -EINVAL; } - info->key.tp_src = udp_flow_src_port(bareudp->net, skb, - bareudp->sport_min, - USHRT_MAX, true); + info->key.tp_src = sport; info->key.tp_dst = bareudp->port; return 0; } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index dc2c7b9796..7edf0fd58c 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -985,7 +985,8 @@ static int alb_upper_dev_walk(struct net_device *upper, if (netif_is_macvlan(upper) && !strict_match) { tags = bond_verify_device_path(bond->dev, upper, 0); if (IS_ERR_OR_NULL(tags)) - BUG(); + return -ENOMEM; + alb_send_lp_vid(slave, upper->dev_addr, tags[0].vlan_proto, tags[0].vlan_id); kfree(tags); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8e6cc0e133..6cf7f36470 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1819,6 +1819,8 @@ void bond_xdp_set_features(struct net_device *bond_dev) bond_for_each_slave(bond, slave, iter) val &= slave->dev->xdp_features; + val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY; + xdp_set_features_flag(bond_dev, val); } @@ -5934,9 +5936,6 @@ void bond_setup(struct net_device *bond_dev) if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) bond_dev->features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ - - if (bond_xdp_check(bond)) - bond_dev->xdp_features = NETDEV_XDP_ACT_MASK; } /* Destroy a bonding device. diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 27cbe148f0..cfa74cf8bb 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -85,7 +85,7 @@ nla_put_failure: } /* Limit the max delay range to 300s */ -static struct netlink_range_validation delay_range = { +static const struct netlink_range_validation delay_range = { .max = 300000, }; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index f8cde9f9f5..eb410714af 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -89,6 +89,7 @@ config CAN_RX_OFFLOAD config CAN_AT91 tristate "Atmel AT91 onchip CAN controller" depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM + select CAN_RX_OFFLOAD help This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 and AT91SAM9X5 processors. diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 4621266851..11f434d708 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -3,9 +3,10 @@ * at91_can.c - CAN network driver for AT91 SoC CAN controller * * (C) 2007 by Hans J. Koch - * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde + * (C) 2008, 2009, 2010, 2011, 2023 by Marc Kleine-Budde */ +#include #include #include #include @@ -15,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -24,90 +26,115 @@ #include #include +#include -#define AT91_MB_MASK(i) ((1 << (i)) - 1) +#define AT91_MB_MASK(i) ((1 << (i)) - 1) /* Common registers */ enum at91_reg { - AT91_MR = 0x000, - AT91_IER = 0x004, - AT91_IDR = 0x008, - AT91_IMR = 0x00C, - AT91_SR = 0x010, - AT91_BR = 0x014, - AT91_TIM = 0x018, - AT91_TIMESTP = 0x01C, - AT91_ECR = 0x020, - AT91_TCR = 0x024, - AT91_ACR = 0x028, + AT91_MR = 0x000, + AT91_IER = 0x004, + AT91_IDR = 0x008, + AT91_IMR = 0x00C, + AT91_SR = 0x010, + AT91_BR = 0x014, + AT91_TIM = 0x018, + AT91_TIMESTP = 0x01C, + AT91_ECR = 0x020, + AT91_TCR = 0x024, + AT91_ACR = 0x028, }; /* Mailbox registers (0 <= i <= 15) */ -#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) -#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) -#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) -#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) -#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) -#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) -#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) -#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) +#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) +#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) +#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) +#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) +#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) +#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) +#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) +#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) /* Register bits */ -#define AT91_MR_CANEN BIT(0) -#define AT91_MR_LPM BIT(1) -#define AT91_MR_ABM BIT(2) -#define AT91_MR_OVL BIT(3) -#define AT91_MR_TEOF BIT(4) -#define AT91_MR_TTM BIT(5) -#define AT91_MR_TIMFRZ BIT(6) -#define AT91_MR_DRPT BIT(7) - -#define AT91_SR_RBSY BIT(29) - -#define AT91_MMR_PRIO_SHIFT (16) - -#define AT91_MID_MIDE BIT(29) - -#define AT91_MSR_MRTR BIT(20) -#define AT91_MSR_MABT BIT(22) -#define AT91_MSR_MRDY BIT(23) -#define AT91_MSR_MMI BIT(24) - -#define AT91_MCR_MRTR BIT(20) -#define AT91_MCR_MTCR BIT(23) +#define AT91_MR_CANEN BIT(0) +#define AT91_MR_LPM BIT(1) +#define AT91_MR_ABM BIT(2) +#define AT91_MR_OVL BIT(3) +#define AT91_MR_TEOF BIT(4) +#define AT91_MR_TTM BIT(5) +#define AT91_MR_TIMFRZ BIT(6) +#define AT91_MR_DRPT BIT(7) + +#define AT91_SR_RBSY BIT(29) +#define AT91_SR_TBSY BIT(30) +#define AT91_SR_OVLSY BIT(31) + +#define AT91_BR_PHASE2_MASK GENMASK(2, 0) +#define AT91_BR_PHASE1_MASK GENMASK(6, 4) +#define AT91_BR_PROPAG_MASK GENMASK(10, 8) +#define AT91_BR_SJW_MASK GENMASK(13, 12) +#define AT91_BR_BRP_MASK GENMASK(22, 16) +#define AT91_BR_SMP BIT(24) + +#define AT91_TIM_TIMER_MASK GENMASK(15, 0) + +#define AT91_ECR_REC_MASK GENMASK(8, 0) +#define AT91_ECR_TEC_MASK GENMASK(23, 16) + +#define AT91_TCR_TIMRST BIT(31) + +#define AT91_MMR_MTIMEMARK_MASK GENMASK(15, 0) +#define AT91_MMR_PRIOR_MASK GENMASK(19, 16) +#define AT91_MMR_MOT_MASK GENMASK(26, 24) + +#define AT91_MID_MIDVB_MASK GENMASK(17, 0) +#define AT91_MID_MIDVA_MASK GENMASK(28, 18) +#define AT91_MID_MIDE BIT(29) + +#define AT91_MSR_MTIMESTAMP_MASK GENMASK(15, 0) +#define AT91_MSR_MDLC_MASK GENMASK(19, 16) +#define AT91_MSR_MRTR BIT(20) +#define AT91_MSR_MABT BIT(22) +#define AT91_MSR_MRDY BIT(23) +#define AT91_MSR_MMI BIT(24) + +#define AT91_MCR_MDLC_MASK GENMASK(19, 16) +#define AT91_MCR_MRTR BIT(20) +#define AT91_MCR_MACR BIT(22) +#define AT91_MCR_MTCR BIT(23) /* Mailbox Modes */ enum at91_mb_mode { - AT91_MB_MODE_DISABLED = 0, - AT91_MB_MODE_RX = 1, - AT91_MB_MODE_RX_OVRWR = 2, - AT91_MB_MODE_TX = 3, - AT91_MB_MODE_CONSUMER = 4, - AT91_MB_MODE_PRODUCER = 5, + AT91_MB_MODE_DISABLED = 0, + AT91_MB_MODE_RX = 1, + AT91_MB_MODE_RX_OVRWR = 2, + AT91_MB_MODE_TX = 3, + AT91_MB_MODE_CONSUMER = 4, + AT91_MB_MODE_PRODUCER = 5, }; /* Interrupt mask bits */ -#define AT91_IRQ_ERRA BIT(16) -#define AT91_IRQ_WARN BIT(17) -#define AT91_IRQ_ERRP BIT(18) -#define AT91_IRQ_BOFF BIT(19) -#define AT91_IRQ_SLEEP BIT(20) -#define AT91_IRQ_WAKEUP BIT(21) -#define AT91_IRQ_TOVF BIT(22) -#define AT91_IRQ_TSTP BIT(23) -#define AT91_IRQ_CERR BIT(24) -#define AT91_IRQ_SERR BIT(25) -#define AT91_IRQ_AERR BIT(26) -#define AT91_IRQ_FERR BIT(27) -#define AT91_IRQ_BERR BIT(28) - -#define AT91_IRQ_ERR_ALL (0x1fff0000) -#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ - AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) -#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ - AT91_IRQ_ERRP | AT91_IRQ_BOFF) - -#define AT91_IRQ_ALL (0x1fffffff) +#define AT91_IRQ_ERRA BIT(16) +#define AT91_IRQ_WARN BIT(17) +#define AT91_IRQ_ERRP BIT(18) +#define AT91_IRQ_BOFF BIT(19) +#define AT91_IRQ_SLEEP BIT(20) +#define AT91_IRQ_WAKEUP BIT(21) +#define AT91_IRQ_TOVF BIT(22) +#define AT91_IRQ_TSTP BIT(23) +#define AT91_IRQ_CERR BIT(24) +#define AT91_IRQ_SERR BIT(25) +#define AT91_IRQ_AERR BIT(26) +#define AT91_IRQ_FERR BIT(27) +#define AT91_IRQ_BERR BIT(28) + +#define AT91_IRQ_ERR_ALL (0x1fff0000) +#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ + AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) +#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ + AT91_IRQ_ERRP | AT91_IRQ_BOFF) + +#define AT91_IRQ_ALL (0x1fffffff) enum at91_devtype { AT91_DEVTYPE_SAM9263, @@ -116,7 +143,6 @@ enum at91_devtype { struct at91_devtype_data { unsigned int rx_first; - unsigned int rx_split; unsigned int rx_last; unsigned int tx_shift; enum at91_devtype type; @@ -124,14 +150,13 @@ struct at91_devtype_data { struct at91_priv { struct can_priv can; /* must be the first member! */ - struct napi_struct napi; + struct can_rx_offload offload; + struct phy *transceiver; void __iomem *reg_base; - u32 reg_sr; - unsigned int tx_next; - unsigned int tx_echo; - unsigned int rx_next; + unsigned int tx_head; + unsigned int tx_tail; struct at91_devtype_data devtype_data; struct clk *clk; @@ -140,9 +165,13 @@ struct at91_priv { canid_t mb0_id; }; +static inline struct at91_priv *rx_offload_to_priv(struct can_rx_offload *offload) +{ + return container_of(offload, struct at91_priv, offload); +} + static const struct at91_devtype_data at91_at91sam9263_data = { .rx_first = 1, - .rx_split = 8, .rx_last = 11, .tx_shift = 2, .type = AT91_DEVTYPE_SAM9263, @@ -150,7 +179,6 @@ static const struct at91_devtype_data at91_at91sam9263_data = { static const struct at91_devtype_data at91_at91sam9x5_data = { .rx_first = 0, - .rx_split = 4, .rx_last = 5, .tx_shift = 1, .type = AT91_DEVTYPE_SAM9X5, @@ -187,27 +215,6 @@ static inline unsigned int get_mb_rx_last(const struct at91_priv *priv) return priv->devtype_data.rx_last; } -static inline unsigned int get_mb_rx_split(const struct at91_priv *priv) -{ - return priv->devtype_data.rx_split; -} - -static inline unsigned int get_mb_rx_num(const struct at91_priv *priv) -{ - return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1; -} - -static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv) -{ - return get_mb_rx_split(priv) - 1; -} - -static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv) -{ - return AT91_MB_MASK(get_mb_rx_split(priv)) & - ~AT91_MB_MASK(get_mb_rx_first(priv)); -} - static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv) { return priv->devtype_data.tx_shift; @@ -228,24 +235,24 @@ static inline unsigned int get_mb_tx_last(const struct at91_priv *priv) return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1; } -static inline unsigned int get_next_prio_shift(const struct at91_priv *priv) +static inline unsigned int get_head_prio_shift(const struct at91_priv *priv) { return get_mb_tx_shift(priv); } -static inline unsigned int get_next_prio_mask(const struct at91_priv *priv) +static inline unsigned int get_head_prio_mask(const struct at91_priv *priv) { return 0xf << get_mb_tx_shift(priv); } -static inline unsigned int get_next_mb_mask(const struct at91_priv *priv) +static inline unsigned int get_head_mb_mask(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_tx_shift(priv)); } -static inline unsigned int get_next_mask(const struct at91_priv *priv) +static inline unsigned int get_head_mask(const struct at91_priv *priv) { - return get_next_mb_mask(priv) | get_next_prio_mask(priv); + return get_head_mb_mask(priv) | get_head_prio_mask(priv); } static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv) @@ -260,19 +267,19 @@ static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv) ~AT91_MB_MASK(get_mb_tx_first(priv)); } -static inline unsigned int get_tx_next_mb(const struct at91_priv *priv) +static inline unsigned int get_tx_head_mb(const struct at91_priv *priv) { - return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv); + return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv); } -static inline unsigned int get_tx_next_prio(const struct at91_priv *priv) +static inline unsigned int get_tx_head_prio(const struct at91_priv *priv) { - return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf; + return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf; } -static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv) +static inline unsigned int get_tx_tail_mb(const struct at91_priv *priv) { - return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv); + return (priv->tx_tail & get_head_mb_mask(priv)) + get_mb_tx_first(priv); } static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) @@ -288,9 +295,12 @@ static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, static inline void set_mb_mode_prio(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode, - int prio) + u8 prio) { - at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); + const u32 reg_mmr = FIELD_PREP(AT91_MMR_MOT_MASK, mode) | + FIELD_PREP(AT91_MMR_PRIOR_MASK, prio); + + at91_write(priv, AT91_MMR(mb), reg_mmr); } static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, @@ -304,9 +314,10 @@ static inline u32 at91_can_id_to_reg_mid(canid_t can_id) u32 reg_mid; if (can_id & CAN_EFF_FLAG) - reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; + reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, can_id) | + AT91_MID_MIDE; else - reg_mid = (can_id & CAN_SFF_MASK) << 18; + reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK, can_id); return reg_mid; } @@ -318,8 +329,8 @@ static void at91_setup_mailboxes(struct net_device *dev) u32 reg_mid; /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first - * mailbox is disabled. The next 11 mailboxes are used as a - * reception FIFO. The last mailbox is configured with + * mailbox is disabled. The next mailboxes are used as a + * reception FIFO. The last of the RX mailboxes is configured with * overwrite option. The overwrite flag indicates a FIFO * overflow. */ @@ -340,27 +351,30 @@ static void at91_setup_mailboxes(struct net_device *dev) at91_write(priv, AT91_MID(i), AT91_MID_MIDE); } - /* The last 4 mailboxes are used for transmitting. */ + /* The last mailboxes are used for transmitting. */ for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++) set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); - /* Reset tx and rx helper pointers */ - priv->tx_next = priv->tx_echo = 0; - priv->rx_next = get_mb_rx_first(priv); + /* Reset tx helper pointers */ + priv->tx_head = priv->tx_tail = 0; } static int at91_set_bittiming(struct net_device *dev) { const struct at91_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; - u32 reg_br; + u32 reg_br = 0; + + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + reg_br |= AT91_BR_SMP; - reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) | - ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | - ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | - ((bt->phase_seg2 - 1) << 0); + reg_br |= FIELD_PREP(AT91_BR_BRP_MASK, bt->brp - 1) | + FIELD_PREP(AT91_BR_SJW_MASK, bt->sjw - 1) | + FIELD_PREP(AT91_BR_PROPAG_MASK, bt->prop_seg - 1) | + FIELD_PREP(AT91_BR_PHASE1_MASK, bt->phase_seg1 - 1) | + FIELD_PREP(AT91_BR_PHASE2_MASK, bt->phase_seg2 - 1); - netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br); + netdev_dbg(dev, "writing AT91_BR: 0x%08x\n", reg_br); at91_write(priv, AT91_BR, reg_br); @@ -373,8 +387,8 @@ static int at91_get_berr_counter(const struct net_device *dev, const struct at91_priv *priv = netdev_priv(dev); u32 reg_ecr = at91_read(priv, AT91_ECR); - bec->rxerr = reg_ecr & 0xff; - bec->txerr = reg_ecr >> 16; + bec->rxerr = FIELD_GET(AT91_ECR_REC_MASK, reg_ecr); + bec->txerr = FIELD_GET(AT91_ECR_TEC_MASK, reg_ecr); return 0; } @@ -403,9 +417,13 @@ static void at91_chip_start(struct net_device *dev) priv->can.state = CAN_STATE_ERROR_ACTIVE; + /* Dummy read to clear latched line error interrupts on + * sam9x5 and newer SoCs. + */ + at91_read(priv, AT91_SR); + /* Enable interrupts */ - reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; - at91_write(priv, AT91_IDR, AT91_IRQ_ALL); + reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERR_LINE | AT91_IRQ_ERR_FRAME; at91_write(priv, AT91_IER, reg_ier); } @@ -414,6 +432,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) struct at91_priv *priv = netdev_priv(dev); u32 reg_mr; + /* Abort any pending TX requests. However this doesn't seem to + * work in case of bus-off on sama5d3. + */ + at91_write(priv, AT91_ACR, get_irq_mb_tx(priv)); + /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); @@ -437,11 +460,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) * stop sending, waiting for all messages to be delivered, then start * again with mailbox AT91_MB_TX_FIRST prio 0. * - * We use the priv->tx_next as counter for the next transmission + * We use the priv->tx_head as counter for the next transmission * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits * encode the mailbox number, the upper 4 bits the mailbox priority: * - * priv->tx_next = (prio << get_next_prio_shift(priv)) | + * priv->tx_head = (prio << get_next_prio_shift(priv)) | * (mb - get_mb_tx_first(priv)); * */ @@ -455,8 +478,8 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; - mb = get_tx_next_mb(priv); - prio = get_tx_next_prio(priv); + mb = get_tx_head_mb(priv); + prio = get_tx_head_prio(priv); if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { netif_stop_queue(dev); @@ -465,8 +488,12 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } reg_mid = at91_can_id_to_reg_mid(cf->can_id); - reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | - (cf->len << 16) | AT91_MCR_MTCR; + + reg_mcr = FIELD_PREP(AT91_MCR_MDLC_MASK, cf->len) | + AT91_MCR_MTCR; + + if (cf->can_id & CAN_RTR_FLAG) + reg_mcr |= AT91_MCR_MRTR; /* disable MB while writing ID (see datasheet) */ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); @@ -484,15 +511,15 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) /* we have to stop the queue and deliver all messages in case * of a prio+mb counter wrap around. This is the case if - * tx_next buffer prio and mailbox equals 0. + * tx_head buffer prio and mailbox equals 0. * * also stop the queue if next buffer is still in use * (== not ready) */ - priv->tx_next++; - if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & + priv->tx_head++; + if (!(at91_read(priv, AT91_MSR(get_tx_head_mb(priv))) & AT91_MSR_MRDY) || - (priv->tx_next & get_next_mask(priv)) == 0) + (priv->tx_head & get_head_mask(priv)) == 0) netif_stop_queue(dev); /* Enable interrupt for this mailbox */ @@ -501,32 +528,20 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -/** - * at91_activate_rx_low - activate lower rx mailboxes - * @priv: a91 context - * - * Reenables the lower mailboxes for reception of new CAN messages - */ -static inline void at91_activate_rx_low(const struct at91_priv *priv) +static inline u32 at91_get_timestamp(const struct at91_priv *priv) { - u32 mask = get_mb_rx_low_mask(priv); - - at91_write(priv, AT91_TCR, mask); + return at91_read(priv, AT91_TIM); } -/** - * at91_activate_rx_mb - reactive single rx mailbox - * @priv: a91 context - * @mb: mailbox to reactivate - * - * Reenables given mailbox for reception of new CAN messages - */ -static inline void at91_activate_rx_mb(const struct at91_priv *priv, - unsigned int mb) +static inline struct sk_buff * +at91_alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf, u32 *timestamp) { - u32 mask = 1 << mb; + const struct at91_priv *priv = netdev_priv(dev); - at91_write(priv, AT91_TCR, mask); + *timestamp = at91_get_timestamp(priv); + + return alloc_can_err_skb(dev, cf); } /** @@ -537,45 +552,71 @@ static void at91_rx_overflow_err(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; + struct at91_priv *priv = netdev_priv(dev); struct can_frame *cf; + u32 timestamp; + int err; netdev_dbg(dev, "RX buffer overflow\n"); stats->rx_over_errors++; stats->rx_errors++; - skb = alloc_can_err_skb(dev, &cf); + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); if (unlikely(!skb)) return; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - netif_receive_skb(skb); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; } /** - * at91_read_mb - read CAN msg from mailbox (lowlevel impl) - * @dev: net device + * at91_mailbox_read - read CAN msg from mailbox + * @offload: rx-offload * @mb: mailbox number to read from - * @cf: can frame where to store message + * @timestamp: pointer to 32 bit timestamp + * @drop: true indicated mailbox to mark as read and drop frame * - * Reads a CAN message from the given mailbox and stores data into - * given can frame. "mb" and "cf" must be valid. + * Reads a CAN message from the given mailbox if not empty. */ -static void at91_read_mb(struct net_device *dev, unsigned int mb, - struct can_frame *cf) +static struct sk_buff *at91_mailbox_read(struct can_rx_offload *offload, + unsigned int mb, u32 *timestamp, + bool drop) { - const struct at91_priv *priv = netdev_priv(dev); + const struct at91_priv *priv = rx_offload_to_priv(offload); + struct can_frame *cf; + struct sk_buff *skb; u32 reg_msr, reg_mid; + reg_msr = at91_read(priv, AT91_MSR(mb)); + if (!(reg_msr & AT91_MSR_MRDY)) + return NULL; + + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + + skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) { + skb = ERR_PTR(-ENOMEM); + goto mark_as_read; + } + reg_mid = at91_read(priv, AT91_MID(mb)); if (reg_mid & AT91_MID_MIDE) - cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; + cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, reg_mid) | + CAN_EFF_FLAG; else - cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; + cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK, reg_mid); - reg_msr = at91_read(priv, AT91_MSR(mb)); - cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf); + /* extend timestamp to full 32 bit */ + *timestamp = FIELD_GET(AT91_MSR_MTIMESTAMP_MASK, reg_msr) << 16; + + cf->len = can_cc_dlc2len(FIELD_GET(AT91_MSR_MDLC_MASK, reg_msr)); if (reg_msr & AT91_MSR_MRTR) { cf->can_id |= CAN_RTR_FLAG; @@ -588,234 +629,21 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb, at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI)) - at91_rx_overflow_err(dev); -} - -/** - * at91_read_msg - read CAN message from mailbox - * @dev: net device - * @mb: mail box to read from - * - * Reads a CAN message from given mailbox, and put into linux network - * RX queue, does all housekeeping chores (stats, ...) - */ -static void at91_read_msg(struct net_device *dev, unsigned int mb) -{ - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - - skb = alloc_can_skb(dev, &cf); - if (unlikely(!skb)) { - stats->rx_dropped++; - return; - } - - at91_read_mb(dev, mb, cf); - - stats->rx_packets++; - if (!(cf->can_id & CAN_RTR_FLAG)) - stats->rx_bytes += cf->len; - - netif_receive_skb(skb); -} - -/** - * at91_poll_rx - read multiple CAN messages from mailboxes - * @dev: net device - * @quota: max number of pkgs we're allowed to receive - * - * Theory of Operation: - * - * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last()) - * on the chip are reserved for RX. We split them into 2 groups. The - * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last(). - * - * Like it or not, but the chip always saves a received CAN message - * into the first free mailbox it finds (starting with the - * lowest). This makes it very difficult to read the messages in the - * right order from the chip. This is how we work around that problem: - * - * The first message goes into mb nr. 1 and issues an interrupt. All - * rx ints are disabled in the interrupt handler and a napi poll is - * scheduled. We read the mailbox, but do _not_ re-enable the mb (to - * receive another message). - * - * lower mbxs upper - * ____^______ __^__ - * / \ / \ - * +-+-+-+-+-+-+-+-++-+-+-+-+ - * | |x|x|x|x|x|x|x|| | | | | - * +-+-+-+-+-+-+-+-++-+-+-+-+ - * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail - * 0 1 2 3 4 5 6 7 8 9 0 1 / box - * ^ - * | - * \ - * unused, due to chip bug - * - * The variable priv->rx_next points to the next mailbox to read a - * message from. As long we're in the lower mailboxes we just read the - * mailbox but not re-enable it. - * - * With completion of the last of the lower mailboxes, we re-enable the - * whole first group, but continue to look for filled mailboxes in the - * upper mailboxes. Imagine the second group like overflow mailboxes, - * which takes CAN messages if the lower goup is full. While in the - * upper group we re-enable the mailbox right after reading it. Giving - * the chip more room to store messages. - * - * After finishing we look again in the lower group if we've still - * quota. - * - */ -static int at91_poll_rx(struct net_device *dev, int quota) -{ - struct at91_priv *priv = netdev_priv(dev); - u32 reg_sr = at91_read(priv, AT91_SR); - const unsigned long *addr = (unsigned long *)®_sr; - unsigned int mb; - int received = 0; - - if (priv->rx_next > get_mb_rx_low_last(priv) && - reg_sr & get_mb_rx_low_mask(priv)) - netdev_info(dev, - "order of incoming frames cannot be guaranteed\n"); - - again: - for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next); - mb < get_mb_tx_first(priv) && quota > 0; - reg_sr = at91_read(priv, AT91_SR), - mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) { - at91_read_msg(dev, mb); - - /* reactivate mailboxes */ - if (mb == get_mb_rx_low_last(priv)) - /* all lower mailboxed, if just finished it */ - at91_activate_rx_low(priv); - else if (mb > get_mb_rx_low_last(priv)) - /* only the mailbox we read */ - at91_activate_rx_mb(priv, mb); - - received++; - quota--; - } - - /* upper group completed, look again in lower */ - if (priv->rx_next > get_mb_rx_low_last(priv) && - mb > get_mb_rx_last(priv)) { - priv->rx_next = get_mb_rx_first(priv); - if (quota > 0) - goto again; - } - - return received; -} - -static void at91_poll_err_frame(struct net_device *dev, - struct can_frame *cf, u32 reg_sr) -{ - struct at91_priv *priv = netdev_priv(dev); - - /* CRC error */ - if (reg_sr & AT91_IRQ_CERR) { - netdev_dbg(dev, "CERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - } - - /* Stuffing Error */ - if (reg_sr & AT91_IRQ_SERR) { - netdev_dbg(dev, "SERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_STUFF; - } - - /* Acknowledgement Error */ - if (reg_sr & AT91_IRQ_AERR) { - netdev_dbg(dev, "AERR irq\n"); - dev->stats.tx_errors++; - cf->can_id |= CAN_ERR_ACK; - } - - /* Form error */ - if (reg_sr & AT91_IRQ_FERR) { - netdev_dbg(dev, "FERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_FORM; - } - - /* Bit Error */ - if (reg_sr & AT91_IRQ_BERR) { - netdev_dbg(dev, "BERR irq\n"); - dev->stats.tx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_BIT; - } -} - -static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) -{ - struct sk_buff *skb; - struct can_frame *cf; - - if (quota == 0) - return 0; - - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; - - at91_poll_err_frame(dev, cf, reg_sr); - - netif_receive_skb(skb); - - return 1; -} - -static int at91_poll(struct napi_struct *napi, int quota) -{ - struct net_device *dev = napi->dev; - const struct at91_priv *priv = netdev_priv(dev); - u32 reg_sr = at91_read(priv, AT91_SR); - int work_done = 0; - - if (reg_sr & get_irq_mb_rx(priv)) - work_done += at91_poll_rx(dev, quota - work_done); - - /* The error bits are clear on read, - * so use saved value from irq handler. - */ - reg_sr |= priv->reg_sr; - if (reg_sr & AT91_IRQ_ERR_FRAME) - work_done += at91_poll_err(dev, quota - work_done, reg_sr); - - if (work_done < quota) { - /* enable IRQs for frame errors and all mailboxes >= rx_next */ - u32 reg_ier = AT91_IRQ_ERR_FRAME; + at91_rx_overflow_err(offload->dev); - reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); + mark_as_read: + at91_write(priv, AT91_MCR(mb), AT91_MCR_MTCR); - napi_complete_done(napi, work_done); - at91_write(priv, AT91_IER, reg_ier); - } - - return work_done; + return skb; } /* theory of operation: * - * priv->tx_echo holds the number of the oldest can_frame put for + * priv->tx_tail holds the number of the oldest can_frame put for * transmission into the hardware, but not yet ACKed by the CAN tx * complete IRQ. * - * We iterate from priv->tx_echo to priv->tx_next and check if the + * We iterate from priv->tx_tail to priv->tx_head and check if the * packet has been transmitted, echo it back to the CAN framework. If * we discover a not yet transmitted package, stop looking for more. * @@ -826,10 +654,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) u32 reg_msr; unsigned int mb; - /* masking of reg_sr not needed, already done by at91_irq */ - - for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { - mb = get_tx_echo_mb(priv); + for (/* nix */; (priv->tx_head - priv->tx_tail) > 0; priv->tx_tail++) { + mb = get_tx_tail_mb(priv); /* no event in mailbox? */ if (!(reg_sr & (1 << mb))) @@ -844,236 +670,202 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) * parked in the echo queue. */ reg_msr = at91_read(priv, AT91_MSR(mb)); - if (likely(reg_msr & AT91_MSR_MRDY && - ~reg_msr & AT91_MSR_MABT)) { - /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ - dev->stats.tx_bytes += - can_get_echo_skb(dev, - mb - get_mb_tx_first(priv), - NULL); - dev->stats.tx_packets++; - } + if (unlikely(!(reg_msr & AT91_MSR_MRDY && + ~reg_msr & AT91_MSR_MABT))) + continue; + + /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ + dev->stats.tx_bytes += + can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL); + dev->stats.tx_packets++; } /* restart queue if we don't have a wrap around but restart if * we get a TX int for the last can frame directly before a * wrap around. */ - if ((priv->tx_next & get_next_mask(priv)) != 0 || - (priv->tx_echo & get_next_mask(priv)) == 0) + if ((priv->tx_head & get_head_mask(priv)) != 0 || + (priv->tx_tail & get_head_mask(priv)) == 0) netif_wake_queue(dev); } -static void at91_irq_err_state(struct net_device *dev, - struct can_frame *cf, enum can_state new_state) +static void at91_irq_err_line(struct net_device *dev, const u32 reg_sr) { + struct net_device_stats *stats = &dev->stats; + enum can_state new_state, rx_state, tx_state; struct at91_priv *priv = netdev_priv(dev); - u32 reg_idr = 0, reg_ier = 0; struct can_berr_counter bec; + struct sk_buff *skb; + struct can_frame *cf; + u32 timestamp; + int err; at91_get_berr_counter(dev, &bec); + can_state_get_by_berr_counter(dev, &bec, &tx_state, &rx_state); - switch (priv->can.state) { - case CAN_STATE_ERROR_ACTIVE: - /* from: ERROR_ACTIVE - * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF - * => : there was a warning int - */ - if (new_state >= CAN_STATE_ERROR_WARNING && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Warning IRQ\n"); - priv->can.can_stats.error_warning++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } - fallthrough; - case CAN_STATE_ERROR_WARNING: - /* from: ERROR_ACTIVE, ERROR_WARNING - * to : ERROR_PASSIVE, BUS_OFF - * => : error passive int - */ - if (new_state >= CAN_STATE_ERROR_PASSIVE && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Passive IRQ\n"); - priv->can.can_stats.error_passive++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - break; - case CAN_STATE_BUS_OFF: - /* from: BUS_OFF - * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE - */ - if (new_state <= CAN_STATE_ERROR_PASSIVE) { - cf->can_id |= CAN_ERR_RESTARTED; + /* The chip automatically recovers from bus-off after 128 + * occurrences of 11 consecutive recessive bits. + * + * After an auto-recovered bus-off, the error counters no + * longer reflect this fact. On the sam9263 the state bits in + * the SR register show the current state (based on the + * current error counters), while on sam9x5 and newer SoCs + * these bits are latched. + * + * Take any latched bus-off information from the SR register + * into account when calculating the CAN new state, to start + * the standard CAN bus off handling. + */ + if (reg_sr & AT91_IRQ_BOFF) + rx_state = CAN_STATE_BUS_OFF; - netdev_dbg(dev, "restarted\n"); - priv->can.can_stats.restarts++; + new_state = max(tx_state, rx_state); - netif_carrier_on(dev); - netif_wake_queue(dev); - } - break; - default: - break; - } + /* state hasn't changed */ + if (likely(new_state == priv->can.state)) + return; - /* process state changes depending on the new state */ - switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: - /* actually we want to enable AT91_IRQ_WARN here, but - * it screws up the system under certain - * circumstances. so just enable AT91_IRQ_ERRP, thus - * the "fallthrough" - */ - netdev_dbg(dev, "Error Active\n"); - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_ACTIVE; - fallthrough; - case CAN_STATE_ERROR_WARNING: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; - reg_ier = AT91_IRQ_ERRP; - break; - case CAN_STATE_ERROR_PASSIVE: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP; - reg_ier = AT91_IRQ_BOFF; - break; - case CAN_STATE_BUS_OFF: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP | - AT91_IRQ_WARN | AT91_IRQ_BOFF; - reg_ier = 0; + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); + can_change_state(dev, cf, tx_state, rx_state); - cf->can_id |= CAN_ERR_BUSOFF; + if (new_state == CAN_STATE_BUS_OFF) { + at91_chip_stop(dev, CAN_STATE_BUS_OFF); + can_bus_off(dev); + } - netdev_dbg(dev, "bus-off\n"); - netif_carrier_off(dev); - priv->can.can_stats.bus_off++; + if (unlikely(!skb)) + return; - /* turn off chip, if restart is disabled */ - if (!priv->can.restart_ms) { - at91_chip_stop(dev, CAN_STATE_BUS_OFF); - return; - } - break; - default: - break; + if (new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; } - at91_write(priv, AT91_IDR, reg_idr); - at91_write(priv, AT91_IER, reg_ier); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; } -static int at91_get_state_by_bec(const struct net_device *dev, - enum can_state *state) +static void at91_irq_err_frame(struct net_device *dev, const u32 reg_sr) { - struct can_berr_counter bec; + struct net_device_stats *stats = &dev->stats; + struct at91_priv *priv = netdev_priv(dev); + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp; int err; - err = at91_get_berr_counter(dev, &bec); - if (err) - return err; + priv->can.can_stats.bus_error++; - if (bec.txerr < 96 && bec.rxerr < 96) - *state = CAN_STATE_ERROR_ACTIVE; - else if (bec.txerr < 128 && bec.rxerr < 128) - *state = CAN_STATE_ERROR_WARNING; - else if (bec.txerr < 256 && bec.rxerr < 256) - *state = CAN_STATE_ERROR_PASSIVE; - else - *state = CAN_STATE_BUS_OFF; + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); + if (cf) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - return 0; -} + if (reg_sr & AT91_IRQ_CERR) { + netdev_dbg(dev, "CRC error\n"); -static void at91_irq_err(struct net_device *dev) -{ - struct at91_priv *priv = netdev_priv(dev); - struct sk_buff *skb; - struct can_frame *cf; - enum can_state new_state; - u32 reg_sr; - int err; + stats->rx_errors++; + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } + + if (reg_sr & AT91_IRQ_SERR) { + netdev_dbg(dev, "Stuff error\n"); + + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } - if (at91_is_sam9263(priv)) { - reg_sr = at91_read(priv, AT91_SR); - - /* we need to look at the unmasked reg_sr */ - if (unlikely(reg_sr & AT91_IRQ_BOFF)) { - new_state = CAN_STATE_BUS_OFF; - } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) { - new_state = CAN_STATE_ERROR_PASSIVE; - } else if (unlikely(reg_sr & AT91_IRQ_WARN)) { - new_state = CAN_STATE_ERROR_WARNING; - } else if (likely(reg_sr & AT91_IRQ_ERRA)) { - new_state = CAN_STATE_ERROR_ACTIVE; - } else { - netdev_err(dev, "BUG! hardware in undefined state\n"); - return; + if (reg_sr & AT91_IRQ_AERR) { + netdev_dbg(dev, "NACK error\n"); + + stats->tx_errors++; + if (cf) { + cf->can_id |= CAN_ERR_ACK; + cf->data[2] |= CAN_ERR_PROT_TX; } - } else { - err = at91_get_state_by_bec(dev, &new_state); - if (err) - return; } - /* state hasn't changed */ - if (likely(new_state == priv->can.state)) - return; + if (reg_sr & AT91_IRQ_FERR) { + netdev_dbg(dev, "Format error\n"); - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + } + + if (reg_sr & AT91_IRQ_BERR) { + netdev_dbg(dev, "Bit error\n"); + + stats->tx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT; + } + + if (!cf) return; - at91_irq_err_state(dev, cf, new_state); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; +} + +static u32 at91_get_reg_sr_rx(const struct at91_priv *priv, u32 *reg_sr_p) +{ + const u32 reg_sr = at91_read(priv, AT91_SR); - netif_rx(skb); + *reg_sr_p |= reg_sr; - priv->can.state = new_state; + return reg_sr & get_irq_mb_rx(priv); } -/* interrupt handler - */ static irqreturn_t at91_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct at91_priv *priv = netdev_priv(dev); irqreturn_t handled = IRQ_NONE; - u32 reg_sr, reg_imr; + u32 reg_sr = 0, reg_sr_rx; + int ret; - reg_sr = at91_read(priv, AT91_SR); - reg_imr = at91_read(priv, AT91_IMR); - - /* Ignore masked interrupts */ - reg_sr &= reg_imr; - if (!reg_sr) - goto exit; - - handled = IRQ_HANDLED; + /* Receive interrupt + * Some bits of AT91_SR are cleared on read, keep them in reg_sr. + */ + while ((reg_sr_rx = at91_get_reg_sr_rx(priv, ®_sr))) { + ret = can_rx_offload_irq_offload_timestamp(&priv->offload, + reg_sr_rx); + handled = IRQ_HANDLED; - /* Receive or error interrupt? -> napi */ - if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) { - /* The error bits are clear on read, - * save for later use. - */ - priv->reg_sr = reg_sr; - at91_write(priv, AT91_IDR, - get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME); - napi_schedule(&priv->napi); + if (!ret) + break; } /* Transmission complete interrupt */ - if (reg_sr & get_irq_mb_tx(priv)) + if (reg_sr & get_irq_mb_tx(priv)) { at91_irq_tx(dev, reg_sr); + handled = IRQ_HANDLED; + } - at91_irq_err(dev); + /* Line Error interrupt */ + if (reg_sr & AT91_IRQ_ERR_LINE || + priv->can.state > CAN_STATE_ERROR_ACTIVE) { + at91_irq_err_line(dev, reg_sr); + handled = IRQ_HANDLED; + } + + /* Frame Error Interrupt */ + if (reg_sr & AT91_IRQ_ERR_FRAME) { + at91_irq_err_frame(dev, reg_sr); + handled = IRQ_HANDLED; + } + + if (handled) + can_rx_offload_irq_finish(&priv->offload); - exit: return handled; } @@ -1082,33 +874,38 @@ static int at91_open(struct net_device *dev) struct at91_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk); + err = phy_power_on(priv->transceiver); if (err) return err; /* check or determine and set bittime */ err = open_candev(dev); if (err) - goto out; + goto out_phy_power_off; + + err = clk_prepare_enable(priv->clk); + if (err) + goto out_close_candev; /* register interrupt handler */ - if (request_irq(dev->irq, at91_irq, IRQF_SHARED, - dev->name, dev)) { - err = -EAGAIN; - goto out_close; - } + err = request_irq(dev->irq, at91_irq, IRQF_SHARED, + dev->name, dev); + if (err) + goto out_clock_disable_unprepare; /* start chip and queuing */ at91_chip_start(dev); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(dev); return 0; - out_close: - close_candev(dev); - out: + out_clock_disable_unprepare: clk_disable_unprepare(priv->clk); + out_close_candev: + close_candev(dev); + out_phy_power_off: + phy_power_off(priv->transceiver); return err; } @@ -1120,11 +917,12 @@ static int at91_close(struct net_device *dev) struct at91_priv *priv = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); clk_disable_unprepare(priv->clk); + phy_power_off(priv->transceiver); close_candev(dev); @@ -1249,6 +1047,7 @@ static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_ static int at91_can_probe(struct platform_device *pdev) { const struct at91_devtype_data *devtype_data; + struct phy *transceiver; struct net_device *dev; struct at91_priv *priv; struct resource *res; @@ -1297,6 +1096,13 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_iounmap; } + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) { + err = PTR_ERR(transceiver); + dev_err_probe(&pdev->dev, err, "failed to get phy\n"); + goto exit_iounmap; + } + dev->netdev_ops = &at91_netdev_ops; dev->ethtool_ops = &at91_ethtool_ops; dev->irq = irq; @@ -1314,8 +1120,14 @@ static int at91_can_probe(struct platform_device *pdev) priv->clk = clk; priv->pdata = dev_get_platdata(&pdev->dev); priv->mb0_id = 0x7ff; + priv->offload.mailbox_read = at91_mailbox_read; + priv->offload.mb_first = devtype_data->rx_first; + priv->offload.mb_last = devtype_data->rx_last; + + can_rx_offload_add_timestamp(dev, &priv->offload); - netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); + if (transceiver) + priv->can.bitrate_max = transceiver->attrs.max_link_rate; if (at91_is_sam9263(priv)) dev->sysfs_groups[0] = &at91_sysfs_attr_group; diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 735d5de3ca..3a3be5cdfc 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -90,6 +90,28 @@ const char *can_get_state_str(const enum can_state state) } EXPORT_SYMBOL_GPL(can_get_state_str); +static enum can_state can_state_err_to_state(u16 err) +{ + if (err < CAN_ERROR_WARNING_THRESHOLD) + return CAN_STATE_ERROR_ACTIVE; + if (err < CAN_ERROR_PASSIVE_THRESHOLD) + return CAN_STATE_ERROR_WARNING; + if (err < CAN_BUS_OFF_THRESHOLD) + return CAN_STATE_ERROR_PASSIVE; + + return CAN_STATE_BUS_OFF; +} + +void can_state_get_by_berr_counter(const struct net_device *dev, + const struct can_berr_counter *bec, + enum can_state *tx_state, + enum can_state *rx_state) +{ + *tx_state = can_state_err_to_state(bec->txerr); + *rx_state = can_state_err_to_state(bec->rxerr); +} +EXPORT_SYMBOL_GPL(can_state_get_by_berr_counter); + void can_change_state(struct net_device *dev, struct can_frame *cf, enum can_state tx_state, enum can_state rx_state) { @@ -142,23 +164,20 @@ static void can_restart(struct net_device *dev) /* send restart message upstream */ skb = alloc_can_err_skb(dev, &cf); - if (!skb) - goto restart; - - cf->can_id |= CAN_ERR_RESTARTED; - - netif_rx(skb); - -restart: - netdev_dbg(dev, "restarted\n"); - priv->can_stats.restarts++; + if (skb) { + cf->can_id |= CAN_ERR_RESTARTED; + netif_rx(skb); + } /* Now restart the device */ netif_carrier_on(dev); err = priv->do_set_mode(dev, CAN_MODE_START); if (err) { - netdev_err(dev, "Error %d during restart", err); + netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err)); netif_carrier_off(dev); + } else { + netdev_dbg(dev, "Restarted\n"); + priv->can_stats.restarts++; } } diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 036d85ef07..dfdc039d92 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -346,7 +346,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], /* Neither of TDC parameters nor TDC flags are * provided: do calculation */ - can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming, + can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, &priv->ctrlmode, priv->ctrlmode_supported); } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly * turned off. TDC is disabled: do nothing diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index 77091f7d1f..46e7b6db4a 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -67,7 +67,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) /* Check if there was another interrupt */ if (!skb_queue_empty(&offload->skb_queue)) - napi_reschedule(&offload->napi); + napi_schedule(&offload->napi); } return work_done; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 84f34020aa..da396d641e 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -462,7 +462,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) card->led_chip.owner = THIS_MODULE; card->led_chip.dev.parent = &pdev->dev; card->led_chip.algo_data = &card->i2c_bit; - strncpy(card->led_chip.name, "peak_i2c", + strscpy(card->led_chip.name, "peak_i2c", sizeof(card->led_chip.name)); card->i2c_bit = peak_pciec_i2c_bit_ops; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 743c2eb62b..ddb3247948 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -206,7 +206,7 @@ static void sja1000_start(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); - /* leave reset mode */ + /* enter reset mode */ if (priv->can.state != CAN_STATE_STOPPED) set_reset_mode(dev); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 4e27dc913c..0d628b35fd 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -757,7 +757,7 @@ int b53_configure_vlan(struct dsa_switch *ds) /* Create an untagged VLAN entry for the default PVID in case * CONFIG_VLAN_8021Q is disabled and there are no calls to - * dsa_slave_vlan_rx_add_vid() to create the default VLAN + * dsa_user_vlan_rx_add_vid() to create the default VLAN * entry. Do this only when the tagging protocol is not * DSA_TAG_PROTO_NONE */ @@ -958,7 +958,7 @@ static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) return NULL; } - return mdiobus_get_phy(ds->slave_mii_bus, port); + return mdiobus_get_phy(ds->user_mii_bus, port); } void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index 4d55d8d183..897e5e8b3d 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -329,7 +329,7 @@ static int b53_mdio_probe(struct mdio_device *mdiodev) * layer setup */ if (of_machine_is_compatible("brcm,bcm7445d0") && - strcmp(mdiodev->bus->name, "sf2 slave mii")) + strcmp(mdiodev->bus->name, "sf2 user mii")) return -EPROBE_DEFER; dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus); diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 5e39641ea8..3a89349dc9 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -324,14 +324,12 @@ static int b53_mmap_probe(struct platform_device *pdev) return b53_switch_register(dev); } -static int b53_mmap_remove(struct platform_device *pdev) +static void b53_mmap_remove(struct platform_device *pdev) { struct b53_device *dev = platform_get_drvdata(pdev); if (dev) b53_switch_remove(dev); - - return 0; } static void b53_mmap_shutdown(struct platform_device *pdev) @@ -372,7 +370,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table); static struct platform_driver b53_mmap_driver = { .probe = b53_mmap_probe, - .remove = b53_mmap_remove, + .remove_new = b53_mmap_remove, .shutdown = b53_mmap_shutdown, .driver = { .name = "b53-switch", diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index bcb4403440..f3f95332ff 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -657,17 +657,15 @@ static int b53_srab_probe(struct platform_device *pdev) return b53_switch_register(dev); } -static int b53_srab_remove(struct platform_device *pdev) +static void b53_srab_remove(struct platform_device *pdev) { struct b53_device *dev = platform_get_drvdata(pdev); if (!dev) - return 0; + return; b53_srab_intr_set(dev->priv, false); b53_switch_remove(dev); - - return 0; } static void b53_srab_shutdown(struct platform_device *pdev) @@ -684,7 +682,7 @@ static void b53_srab_shutdown(struct platform_device *pdev) static struct platform_driver b53_srab_driver = { .probe = b53_srab_probe, - .remove = b53_srab_remove, + .remove_new = b53_srab_remove, .shutdown = b53_srab_shutdown, .driver = { .name = "b53-srab-switch", diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index cd1f240c90..cadee5505c 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -623,19 +623,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->master_mii_dn = dn; - priv->slave_mii_bus = mdiobus_alloc(); - if (!priv->slave_mii_bus) { + priv->user_mii_bus = mdiobus_alloc(); + if (!priv->user_mii_bus) { err = -ENOMEM; goto err_put_master_mii_bus_dev; } - priv->slave_mii_bus->priv = priv; - priv->slave_mii_bus->name = "sf2 slave mii"; - priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; - priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; - snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", + priv->user_mii_bus->priv = priv; + priv->user_mii_bus->name = "sf2 user mii"; + priv->user_mii_bus->read = bcm_sf2_sw_mdio_read; + priv->user_mii_bus->write = bcm_sf2_sw_mdio_write; + snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", index++); - priv->slave_mii_bus->dev.of_node = dn; + priv->user_mii_bus->dev.of_node = dn; /* Include the pseudo-PHY address to divert reads towards our * workaround. This is only required for 7445D0, since 7445E0 @@ -653,9 +653,9 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->indir_phy_mask = 0; ds->phys_mii_mask = priv->indir_phy_mask; - ds->slave_mii_bus = priv->slave_mii_bus; - priv->slave_mii_bus->parent = ds->dev->parent; - priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; + ds->user_mii_bus = priv->user_mii_bus; + priv->user_mii_bus->parent = ds->dev->parent; + priv->user_mii_bus->phy_mask = ~priv->indir_phy_mask; /* We need to make sure that of_phy_connect() will not work by * removing the 'phandle' and 'linux,phandle' properties and @@ -682,14 +682,14 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) phy_device_remove(phydev); } - err = mdiobus_register(priv->slave_mii_bus); + err = mdiobus_register(priv->user_mii_bus); if (err && dn) - goto err_free_slave_mii_bus; + goto err_free_user_mii_bus; return 0; -err_free_slave_mii_bus: - mdiobus_free(priv->slave_mii_bus); +err_free_user_mii_bus: + mdiobus_free(priv->user_mii_bus); err_put_master_mii_bus_dev: put_device(&priv->master_mii_bus->dev); err_of_node_put: @@ -699,10 +699,9 @@ err_of_node_put: static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) { - mdiobus_unregister(priv->slave_mii_bus); - mdiobus_free(priv->slave_mii_bus); + mdiobus_unregister(priv->user_mii_bus); + mdiobus_free(priv->user_mii_bus); put_device(&priv->master_mii_bus->dev); - of_node_put(priv->master_mii_dn); } static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) @@ -915,7 +914,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) - netif_carrier_off(dsa_to_port(ds, port)->slave); + netif_carrier_off(dsa_to_port(ds, port)->user); status->duplex = DUPLEX_FULL; } else { status->link = true; @@ -989,7 +988,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol = { }; @@ -1013,7 +1012,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; struct ethtool_wolinfo pwol = { }; @@ -1543,12 +1542,12 @@ out_clk: return ret; } -static int bcm_sf2_sw_remove(struct platform_device *pdev) +static void bcm_sf2_sw_remove(struct platform_device *pdev) { struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); if (!priv) - return 0; + return; priv->wol_ports_mask = 0; /* Disable interrupts */ @@ -1560,8 +1559,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); if (priv->type == BCM7278_DEVICE_ID) reset_control_assert(priv->rcdev); - - return 0; } static void bcm_sf2_sw_shutdown(struct platform_device *pdev) @@ -1607,7 +1604,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, - .remove = bcm_sf2_sw_remove, + .remove_new = bcm_sf2_sw_remove, .shutdown = bcm_sf2_sw_shutdown, .driver = { .name = "brcm-sf2", diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 00afc94ce5..424f896b5a 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -108,7 +108,7 @@ struct bcm_sf2_priv { /* Master and slave MDIO bus controller */ unsigned int indir_phy_mask; struct device_node *master_mii_dn; - struct mii_bus *slave_mii_bus; + struct mii_bus *user_mii_bus; struct mii_bus *master_mii_bus; /* Bitmask of ports needing BRCM tags */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index c4010b7bf0..c88ee3dd42 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc, u32 *rule_locs) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; @@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 5b139f2206..c70ed67cc1 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -277,6 +277,14 @@ static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port) return ETH_MAX_MTU; } +static void dsa_loop_phylink_get_caps(struct dsa_switch *dsa, int port, + struct phylink_config *config) +{ + bitmap_fill(config->supported_interfaces, PHY_INTERFACE_MODE_MAX); + __clear_bit(PHY_INTERFACE_MODE_NA, config->supported_interfaces); + config->mac_capabilities = ~0; +} + static const struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, @@ -295,6 +303,7 @@ static const struct dsa_switch_ops dsa_loop_driver = { .port_vlan_del = dsa_loop_port_vlan_del, .port_change_mtu = dsa_loop_port_change_mtu, .port_max_mtu = dsa_loop_port_max_mtu, + .phylink_get_caps = dsa_loop_phylink_get_caps, }; static int dsa_loop_drv_probe(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 11ef1d7ea2..beda1e9d35 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -2060,18 +2060,16 @@ err_ptp_setup: return ret; } -static int hellcreek_remove(struct platform_device *pdev) +static void hellcreek_remove(struct platform_device *pdev) { struct hellcreek *hellcreek = platform_get_drvdata(pdev); if (!hellcreek) - return 0; + return; hellcreek_hwtstamp_free(hellcreek); hellcreek_ptp_free(hellcreek); dsa_unregister_switch(hellcreek->ds); - - return 0; } static void hellcreek_shutdown(struct platform_device *pdev) @@ -2107,7 +2105,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match); static struct platform_driver hellcreek_driver = { .probe = hellcreek_probe, - .remove = hellcreek_remove, + .remove_new = hellcreek_remove, .shutdown = hellcreek_shutdown, .driver = { .name = "hellcreek", diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index ee67adeb2c..fcb20eac33 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1084,7 +1084,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, if (!dsa_port_is_user(dp)) return 0; - vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port); + vlan_vid_add(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port); return lan9303_enable_processing_port(chip, port); } @@ -1097,7 +1097,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) if (!dsa_port_is_user(dp)) return; - vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port); + vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port); lan9303_disable_processing_port(chip, port); lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 3c76a1a14a..9c185c9f09 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -510,22 +510,22 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) struct dsa_switch *ds = priv->ds; int err; - ds->slave_mii_bus = mdiobus_alloc(); - if (!ds->slave_mii_bus) + ds->user_mii_bus = mdiobus_alloc(); + if (!ds->user_mii_bus) return -ENOMEM; - ds->slave_mii_bus->priv = priv; - ds->slave_mii_bus->read = gswip_mdio_rd; - ds->slave_mii_bus->write = gswip_mdio_wr; - ds->slave_mii_bus->name = "lantiq,xrx200-mdio"; - snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", + ds->user_mii_bus->priv = priv; + ds->user_mii_bus->read = gswip_mdio_rd; + ds->user_mii_bus->write = gswip_mdio_wr; + ds->user_mii_bus->name = "lantiq,xrx200-mdio"; + snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); - ds->slave_mii_bus->parent = priv->dev; - ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; + ds->user_mii_bus->parent = priv->dev; + ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask; - err = of_mdiobus_register(ds->slave_mii_bus, mdio_np); + err = of_mdiobus_register(ds->user_mii_bus, mdio_np); if (err) - mdiobus_free(ds->slave_mii_bus); + mdiobus_free(ds->user_mii_bus); return err; } @@ -1759,8 +1759,7 @@ static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, return; for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) - strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name, - ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", gswip_rmon_cnt[i].name); } static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, @@ -2197,8 +2196,8 @@ disable_switch: dsa_unregister_switch(priv->ds); mdio_bus: if (mdio_np) { - mdiobus_unregister(priv->ds->slave_mii_bus); - mdiobus_free(priv->ds->slave_mii_bus); + mdiobus_unregister(priv->ds->user_mii_bus); + mdiobus_free(priv->ds->user_mii_bus); } put_mdio_node: of_node_put(mdio_np); @@ -2207,29 +2206,27 @@ put_mdio_node: return err; } -static int gswip_remove(struct platform_device *pdev) +static void gswip_remove(struct platform_device *pdev) { struct gswip_priv *priv = platform_get_drvdata(pdev); int i; if (!priv) - return 0; + return; /* disable the switch */ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); dsa_unregister_switch(priv->ds); - if (priv->ds->slave_mii_bus) { - mdiobus_unregister(priv->ds->slave_mii_bus); - of_node_put(priv->ds->slave_mii_bus->dev.of_node); - mdiobus_free(priv->ds->slave_mii_bus); + if (priv->ds->user_mii_bus) { + mdiobus_unregister(priv->ds->user_mii_bus); + of_node_put(priv->ds->user_mii_bus->dev.of_node); + mdiobus_free(priv->ds->user_mii_bus); } for (i = 0; i < priv->num_gphy_fw; i++) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); - - return 0; } static void gswip_shutdown(struct platform_device *pdev) @@ -2266,7 +2263,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match); static struct platform_driver gswip_driver = { .probe = gswip_probe, - .remove = gswip_remove, + .remove_new = gswip_remove, .shutdown = gswip_shutdown, .driver = { .name = "gswip", diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 48360cc9fc..49459a50db 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o ksz_switch-objs := ksz_common.o -ksz_switch-objs += ksz9477.o +ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o ksz_switch-objs += ksz8795.o ksz_switch-objs += lan937x_main.o diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 91aba470fb..4bf4d67557 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -632,6 +632,50 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) ksz8_w_table(dev, TABLE_VLAN, addr, buf); } +/** + * ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY + * Control register (Reg. 31). + * @dev: The KSZ device instance. + * @port: The port number to be read. + * @val: The value read from the SMI interface. + * + * This function reads the SMI interface and translates the hardware register + * bit values into their corresponding control settings for a MIIM PHY Control + * register. + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val) +{ + const u16 *regs = dev->info->regs; + u8 reg_val; + int ret; + + *val = 0; + + ret = ksz_pread8(dev, port, regs[P_LINK_STATUS], ®_val); + if (ret < 0) + return ret; + + if (reg_val & PORT_MDIX_STATUS) + *val |= KSZ886X_CTRL_MDIX_STAT; + + ret = ksz_pread8(dev, port, REG_PORT_LINK_MD_CTRL, ®_val); + if (ret < 0) + return ret; + + if (reg_val & PORT_FORCE_LINK) + *val |= KSZ886X_CTRL_FORCE_LINK; + + if (reg_val & PORT_POWER_SAVING) + *val |= KSZ886X_CTRL_PWRSAVE; + + if (reg_val & PORT_PHY_REMOTE_LOOPBACK) + *val |= KSZ886X_CTRL_REMOTE_LOOPBACK; + + return 0; +} + int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) { u8 restart, speed, ctrl, link; @@ -769,12 +813,10 @@ int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2)); break; case PHY_REG_PHY_CTRL: - ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link); + ret = ksz8_r_phy_ctrl(dev, p, &data); if (ret) return ret; - if (link & PORT_MDIX_STATUS) - data |= KSZ886X_CTRL_MDIX_STAT; break; default: processed = false; @@ -786,6 +828,38 @@ int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) return 0; } +/** + * ksz8_w_phy_ctrl - Translates and writes to the SMI interface from a MIIM PHY + * Control register (Reg. 31). + * @dev: The KSZ device instance. + * @port: The port number to be configured. + * @val: The register value to be written. + * + * This function translates control settings from a MIIM PHY Control register + * into their corresponding hardware register bit values for the SMI + * interface. + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val) +{ + u8 reg_val = 0; + int ret; + + if (val & KSZ886X_CTRL_FORCE_LINK) + reg_val |= PORT_FORCE_LINK; + + if (val & KSZ886X_CTRL_PWRSAVE) + reg_val |= PORT_POWER_SAVING; + + if (val & KSZ886X_CTRL_REMOTE_LOOPBACK) + reg_val |= PORT_PHY_REMOTE_LOOPBACK; + + ret = ksz_prmw8(dev, port, REG_PORT_LINK_MD_CTRL, PORT_FORCE_LINK | + PORT_POWER_SAVING | PORT_PHY_REMOTE_LOOPBACK, reg_val); + return ret; +} + int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) { u8 restart, speed, ctrl, data; @@ -926,6 +1000,12 @@ int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) if (val & PHY_START_CABLE_DIAG) ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true); break; + + case PHY_REG_PHY_CTRL: + ret = ksz8_w_phy_ctrl(dev, p, val); + if (ret) + return ret; + break; default: break; } diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index 7a57c6088f..3c9dae53e4 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -323,13 +323,6 @@ ((addr) + REG_PORT_1_CTRL_0 + (port) * \ (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0)) -#define REG_SW_MAC_ADDR_0 0x68 -#define REG_SW_MAC_ADDR_1 0x69 -#define REG_SW_MAC_ADDR_2 0x6A -#define REG_SW_MAC_ADDR_3 0x6B -#define REG_SW_MAC_ADDR_4 0x6C -#define REG_SW_MAC_ADDR_5 0x6D - #define TABLE_EXT_SELECT_S 5 #define TABLE_EEE_V 1 #define TABLE_ACL_V 2 @@ -442,20 +435,6 @@ #define TOS_PRIO_M KS_PRIO_M #define TOS_PRIO_S KS_PRIO_S -#define REG_SW_CTRL_20 0xA3 - -#define SW_GMII_DRIVE_STRENGTH_S 4 -#define SW_DRIVE_STRENGTH_M 0x7 -#define SW_DRIVE_STRENGTH_2MA 0 -#define SW_DRIVE_STRENGTH_4MA 1 -#define SW_DRIVE_STRENGTH_8MA 2 -#define SW_DRIVE_STRENGTH_12MA 3 -#define SW_DRIVE_STRENGTH_16MA 4 -#define SW_DRIVE_STRENGTH_20MA 5 -#define SW_DRIVE_STRENGTH_24MA 6 -#define SW_DRIVE_STRENGTH_28MA 7 -#define SW_MII_DRIVE_STRENGTH_S 0 - #define REG_SW_CTRL_21 0xA4 #define SW_IPV6_MLD_OPTION BIT(3) diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 83b7f2d5c1..7f745628c8 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -56,6 +56,187 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu) REG_SW_MTU_MASK, frame_size); } +/** + * ksz9477_handle_wake_reason - Handle wake reason on a specified port. + * @dev: The device structure. + * @port: The port number. + * + * This function reads the PME (Power Management Event) status register of a + * specified port to determine the wake reason. If there is no wake event, it + * returns early. Otherwise, it logs the wake reason which could be due to a + * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register + * is then cleared to acknowledge the handling of the wake event. + * + * Return: 0 on success, or an error code on failure. + */ +static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port) +{ + u8 pme_status; + int ret; + + ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status); + if (ret) + return ret; + + if (!pme_status) + return 0; + + dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, + pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "", + pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "", + pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : ""); + + return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status); +} + +/** + * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port. + * @dev: The device structure. + * @port: The port number. + * @wol: Pointer to ethtool Wake-on-LAN settings structure. + * + * This function checks the PME Pin Control Register to see if PME Pin Output + * Enable is set, indicating PME is enabled. If enabled, it sets the supported + * and active WoL flags. + */ +void ksz9477_get_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol) +{ + u8 pme_ctrl; + int ret; + + if (!dev->wakeup_source) + return; + + wol->supported = WAKE_PHY; + + /* Check if the current MAC address on this port can be set + * as global for WAKE_MAGIC support. The result may vary + * dynamically based on other ports configurations. + */ + if (ksz_is_port_mac_global_usable(dev->ds, port)) + wol->supported |= WAKE_MAGIC; + + ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl); + if (ret) + return; + + if (pme_ctrl & PME_WOL_MAGICPKT) + wol->wolopts |= WAKE_MAGIC; + if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY)) + wol->wolopts |= WAKE_PHY; +} + +/** + * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port. + * @dev: The device structure. + * @port: The port number. + * @wol: Pointer to ethtool Wake-on-LAN settings structure. + * + * This function configures Wake-on-LAN (WoL) settings for a specified port. + * It validates the provided WoL options, checks if PME is enabled via the + * switch's PME Pin Control Register, clears any previous wake reasons, + * and sets the Magic Packet flag in the port's PME control register if + * specified. + * + * Return: 0 on success, or other error codes on failure. + */ +int ksz9477_set_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol) +{ + u8 pme_ctrl = 0, pme_ctrl_old = 0; + bool magic_switched_off; + bool magic_switched_on; + int ret; + + if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + + if (!dev->wakeup_source) + return -EOPNOTSUPP; + + ret = ksz9477_handle_wake_reason(dev, port); + if (ret) + return ret; + + if (wol->wolopts & WAKE_MAGIC) + pme_ctrl |= PME_WOL_MAGICPKT; + if (wol->wolopts & WAKE_PHY) + pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY; + + ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old); + if (ret) + return ret; + + if (pme_ctrl_old == pme_ctrl) + return 0; + + magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) && + !(pme_ctrl & PME_WOL_MAGICPKT); + magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) && + (pme_ctrl & PME_WOL_MAGICPKT); + + /* To keep reference count of MAC address, we should do this + * operation only on change of WOL settings. + */ + if (magic_switched_on) { + ret = ksz_switch_macaddr_get(dev->ds, port, NULL); + if (ret) + return ret; + } else if (magic_switched_off) { + ksz_switch_macaddr_put(dev->ds); + } + + ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl); + if (ret) { + if (magic_switched_on) + ksz_switch_macaddr_put(dev->ds); + return ret; + } + + return 0; +} + +/** + * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while + * considering Wake-on-LAN (WoL) settings. + * @dev: The switch device structure. + * @wol_enabled: Pointer to a boolean which will be set to true if WoL is + * enabled on any port. + * + * This function prepares the switch device for a safe shutdown while taking + * into account the Wake-on-LAN (WoL) settings on the user ports. It updates + * the wol_enabled flag accordingly to reflect whether WoL is active on any + * port. + */ +void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled) +{ + struct dsa_port *dp; + int ret; + + *wol_enabled = false; + + if (!dev->wakeup_source) + return; + + dsa_switch_for_each_user_port(dp, dev->ds) { + u8 pme_ctrl = 0; + + ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl); + if (!ret && pme_ctrl) + *wol_enabled = true; + + /* make sure there are no pending wake events which would + * prevent the device from going to sleep/shutdown. + */ + ksz9477_handle_wake_reason(dev, dp->index); + } + + /* Now we are save to enable PME pin. */ + if (*wol_enabled) + ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE); +} + static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) { unsigned int val; @@ -1004,6 +1185,16 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* clear pending interrupts */ if (dev->info->internal_phy[port]) ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); + + ksz9477_port_acl_init(dev, port); + + /* clear pending wake flags */ + ksz9477_handle_wake_reason(dev, port); + + /* Disable all WoL options by default. Otherwise + * ksz_switch_macaddr_get/put logic will not work properly. + */ + ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0); } void ksz9477_config_cpu_port(struct dsa_switch *ds) @@ -1126,6 +1317,12 @@ int ksz9477_setup(struct dsa_switch *ds) /* enable global MIB counter freeze function */ ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); + /* Make sure PME (WoL) is not enabled. If requested, it will be + * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not + * like PME events changes before shutdown. + */ + ksz_write8(dev, REG_SW_PME_CTRL, 0); + return 0; } @@ -1141,6 +1338,83 @@ int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val) return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val); } +/* The KSZ9477 provides following HW features to accelerate + * HSR frames handling: + * + * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH + * 2. RX PACKET DUPLICATION DISCARDING + * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING + * + * Only one from point 1. has the NETIF_F* flag available. + * + * Ones from point 2 and 3 are "best effort" - i.e. those will + * work correctly most of the time, but it may happen that some + * frames will not be caught - to be more specific; there is a race + * condition in hardware such that, when duplicate packets are received + * on member ports very close in time to each other, the hardware fails + * to detect that they are duplicates. + * + * Hence, the SW needs to handle those special cases. However, the speed + * up gain is considerable when above features are used. + * + * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames + * can be forwarded in the switch fabric between HSR ports. + */ +#define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD) + +void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr) +{ + struct ksz_device *dev = ds->priv; + struct net_device *user; + struct dsa_port *hsr_dp; + u8 data, hsr_ports = 0; + + /* Program which port(s) shall support HSR */ + ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port)); + + /* Forward frames between HSR ports (i.e. bridge together HSR ports) */ + if (dev->hsr_ports) { + dsa_hsr_foreach_port(hsr_dp, ds, hsr) + hsr_ports |= BIT(hsr_dp->index); + + hsr_ports |= BIT(dsa_upstream_port(ds, port)); + dsa_hsr_foreach_port(hsr_dp, ds, hsr) + ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports); + } + + if (!dev->hsr_ports) { + /* Enable discarding of received HSR frames */ + ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data); + data |= HSR_DUPLICATE_DISCARD; + data &= ~HSR_NODE_UNICAST; + ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data); + } + + /* Enable per port self-address filtering. + * The global self-address filtering has already been enabled in the + * ksz9477_reset_switch() function. + */ + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true); + + /* Setup HW supported features for lan HSR ports */ + user = dsa_to_port(ds, port)->user; + user->features |= KSZ9477_SUPPORTED_HSR_FEATURES; +} + +void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr) +{ + struct ksz_device *dev = ds->priv; + + /* Clear port HSR support */ + ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0); + + /* Disable forwarding frames between HSR ports */ + ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port))); + + /* Disable per port self-address filtering */ + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false); +} + int ksz9477_switch_init(struct ksz_device *dev) { u8 data8; diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h index a6f425866a..ce1e656b80 100644 --- a/drivers/net/dsa/microchip/ksz9477.h +++ b/drivers/net/dsa/microchip/ksz9477.h @@ -56,5 +56,48 @@ int ksz9477_reset_switch(struct ksz_device *dev); int ksz9477_switch_init(struct ksz_device *dev); void ksz9477_switch_exit(struct ksz_device *dev); void ksz9477_port_queue_split(struct ksz_device *dev, int port); +void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr); +void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr); +void ksz9477_get_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); +int ksz9477_set_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); +void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled); + +int ksz9477_port_acl_init(struct ksz_device *dev, int port); +void ksz9477_port_acl_free(struct ksz_device *dev, int port); +int ksz9477_cls_flower_add(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress); +int ksz9477_cls_flower_del(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress); + +#define KSZ9477_ACL_ENTRY_SIZE 18 +#define KSZ9477_ACL_MAX_ENTRIES 16 + +struct ksz9477_acl_entry { + u8 entry[KSZ9477_ACL_ENTRY_SIZE]; + unsigned long cookie; + u32 prio; +}; + +struct ksz9477_acl_entries { + struct ksz9477_acl_entry entries[KSZ9477_ACL_MAX_ENTRIES]; + int entries_count; +}; + +struct ksz9477_acl_priv { + struct ksz9477_acl_entries acles; +}; + +void ksz9477_acl_remove_entries(struct ksz_device *dev, int port, + struct ksz9477_acl_entries *acles, + unsigned long cookie); +int ksz9477_acl_write_list(struct ksz_device *dev, int port); +int ksz9477_sort_acl_entries(struct ksz_device *dev, int port); +void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val); +void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx); +void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port, + u16 ethtype, u8 *src_mac, u8 *dst_mac, + unsigned long cookie, u32 prio); #endif diff --git a/drivers/net/dsa/microchip/ksz9477_acl.c b/drivers/net/dsa/microchip/ksz9477_acl.c new file mode 100644 index 0000000000..7ba778df63 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz9477_acl.c @@ -0,0 +1,1436 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2023 Pengutronix, Oleksij Rempel + +/* Access Control List (ACL) structure: + * + * There are multiple groups of registers involved in ACL configuration: + * + * - Matching Rules: These registers define the criteria for matching incoming + * packets based on their header information (Layer 2 MAC, Layer 3 IP, or + * Layer 4 TCP/UDP). Different register settings are used depending on the + * matching rule mode (MD) and the Enable (ENB) settings. + * + * - Action Rules: These registers define how the ACL should modify the packet's + * priority, VLAN tag priority, and forwarding map once a matching rule has + * been triggered. The settings vary depending on whether the matching rule is + * in Count Mode (MD = 01 and ENB = 00) or not. + * + * - Processing Rules: These registers control the overall behavior of the ACL, + * such as selecting which matching rule to apply first, enabling/disabling + * specific rules, or specifying actions for matched packets. + * + * ACL Structure: + * +----------------------+ + * +----------------------+ | (optional) | + * | Matching Rules | | Matching Rules | + * | (Layer 2, 3, 4) | | (Layer 2, 3, 4) | + * +----------------------+ +----------------------+ + * | | + * \___________________________/ + * v + * +----------------------+ + * | Processing Rules | + * | (action idx, | + * | matching rule set) | + * +----------------------+ + * | + * v + * +----------------------+ + * | Action Rules | + * | (Modify Priority, | + * | Forwarding Map, | + * | VLAN tag, etc) | + * +----------------------+ + */ + +#include + +#include "ksz9477.h" +#include "ksz9477_reg.h" +#include "ksz_common.h" + +#define KSZ9477_PORT_ACL_0 0x600 + +enum ksz9477_acl_port_access { + KSZ9477_ACL_PORT_ACCESS_0 = 0x00, + KSZ9477_ACL_PORT_ACCESS_1 = 0x01, + KSZ9477_ACL_PORT_ACCESS_2 = 0x02, + KSZ9477_ACL_PORT_ACCESS_3 = 0x03, + KSZ9477_ACL_PORT_ACCESS_4 = 0x04, + KSZ9477_ACL_PORT_ACCESS_5 = 0x05, + KSZ9477_ACL_PORT_ACCESS_6 = 0x06, + KSZ9477_ACL_PORT_ACCESS_7 = 0x07, + KSZ9477_ACL_PORT_ACCESS_8 = 0x08, + KSZ9477_ACL_PORT_ACCESS_9 = 0x09, + KSZ9477_ACL_PORT_ACCESS_A = 0x0A, + KSZ9477_ACL_PORT_ACCESS_B = 0x0B, + KSZ9477_ACL_PORT_ACCESS_C = 0x0C, + KSZ9477_ACL_PORT_ACCESS_D = 0x0D, + KSZ9477_ACL_PORT_ACCESS_E = 0x0E, + KSZ9477_ACL_PORT_ACCESS_F = 0x0F, + KSZ9477_ACL_PORT_ACCESS_10 = 0x10, + KSZ9477_ACL_PORT_ACCESS_11 = 0x11 +}; + +#define KSZ9477_ACL_MD_MASK GENMASK(5, 4) +#define KSZ9477_ACL_MD_DISABLE 0 +#define KSZ9477_ACL_MD_L2_MAC 1 +#define KSZ9477_ACL_MD_L3_IP 2 +#define KSZ9477_ACL_MD_L4_TCP_UDP 3 + +#define KSZ9477_ACL_ENB_MASK GENMASK(3, 2) +#define KSZ9477_ACL_ENB_L2_COUNTER 0 +#define KSZ9477_ACL_ENB_L2_TYPE 1 +#define KSZ9477_ACL_ENB_L2_MAC 2 +#define KSZ9477_ACL_ENB_L2_MAC_TYPE 3 + +/* only IPv4 src or dst can be used with mask */ +#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_MASK 1 +/* only IPv4 src and dst can be used without mask */ +#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_SRC_DST 2 + +#define KSZ9477_ACL_ENB_L4_IP_PROTO 0 +#define KSZ9477_ACL_ENB_L4_TCP_SRC_DST_PORT 1 +#define KSZ9477_ACL_ENB_L4_UDP_SRC_DST_PORT 2 +#define KSZ9477_ACL_ENB_L4_TCP_SEQ_NUMBER 3 + +#define KSZ9477_ACL_SD_SRC BIT(1) +#define KSZ9477_ACL_SD_DST 0 +#define KSZ9477_ACL_EQ_EQUAL BIT(0) +#define KSZ9477_ACL_EQ_NOT_EQUAL 0 + +#define KSZ9477_ACL_PM_M GENMASK(7, 6) +#define KSZ9477_ACL_PM_DISABLE 0 +#define KSZ9477_ACL_PM_HIGHER 1 +#define KSZ9477_ACL_PM_LOWER 2 +#define KSZ9477_ACL_PM_REPLACE 3 +#define KSZ9477_ACL_P_M GENMASK(5, 3) + +#define KSZ9477_PORT_ACL_CTRL_0 0x0612 + +#define KSZ9477_ACL_WRITE_DONE BIT(6) +#define KSZ9477_ACL_READ_DONE BIT(5) +#define KSZ9477_ACL_WRITE BIT(4) +#define KSZ9477_ACL_INDEX_M GENMASK(3, 0) + +/** + * ksz9477_dump_acl_index - Print the ACL entry at the specified index + * + * @dev: Pointer to the ksz9477 device structure. + * @acle: Pointer to the ACL entry array. + * @index: The index of the ACL entry to print. + * + * This function prints the details of an ACL entry, located at a particular + * index within the ksz9477 device's ACL table. It omits printing entries that + * are empty. + * + * Return: 1 if the entry is non-empty and printed, 0 otherwise. + */ +static int ksz9477_dump_acl_index(struct ksz_device *dev, + struct ksz9477_acl_entry *acle, int index) +{ + bool empty = true; + char buf[64]; + u8 *entry; + int i; + + entry = &acle[index].entry[0]; + for (i = 0; i <= KSZ9477_ACL_PORT_ACCESS_11; i++) { + if (entry[i]) + empty = false; + + sprintf(buf + (i * 3), "%02x ", entry[i]); + } + + /* no need to print empty entries */ + if (empty) + return 0; + + dev_err(dev->dev, " Entry %02d, prio: %02d : %s", index, + acle[index].prio, buf); + + return 1; +} + +/** + * ksz9477_dump_acl - Print ACL entries + * + * @dev: Pointer to the device structure. + * @acle: Pointer to the ACL entry array. + */ +static void ksz9477_dump_acl(struct ksz_device *dev, + struct ksz9477_acl_entry *acle) +{ + int count = 0; + int i; + + for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES; i++) + count += ksz9477_dump_acl_index(dev, acle, i); + + if (count != KSZ9477_ACL_MAX_ENTRIES - 1) + dev_err(dev->dev, " Empty ACL entries were skipped\n"); +} + +/** + * ksz9477_acl_is_valid_matching_rule - Check if an ACL entry contains a valid + * matching rule. + * + * @entry: Pointer to ACL entry buffer + * + * This function checks if the given ACL entry buffer contains a valid + * matching rule by inspecting the Mode (MD) and Enable (ENB) fields. + * + * Returns: True if it's a valid matching rule, false otherwise. + */ +static bool ksz9477_acl_is_valid_matching_rule(u8 *entry) +{ + u8 val1, md, enb; + + val1 = entry[KSZ9477_ACL_PORT_ACCESS_1]; + + md = FIELD_GET(KSZ9477_ACL_MD_MASK, val1); + if (md == KSZ9477_ACL_MD_DISABLE) + return false; + + if (md == KSZ9477_ACL_MD_L2_MAC) { + /* L2 counter is not support, so it is not valid rule for now */ + enb = FIELD_GET(KSZ9477_ACL_ENB_MASK, val1); + if (enb == KSZ9477_ACL_ENB_L2_COUNTER) + return false; + } + + return true; +} + +/** + * ksz9477_acl_get_cont_entr - Get count of contiguous ACL entries and validate + * the matching rules. + * @dev: Pointer to the KSZ9477 device structure. + * @port: Port number. + * @index: Index of the starting ACL entry. + * + * Based on the KSZ9477 switch's Access Control List (ACL) system, the RuleSet + * in an ACL entry indicates which entries contain Matching rules linked to it. + * This RuleSet is represented by two registers: KSZ9477_ACL_PORT_ACCESS_E and + * KSZ9477_ACL_PORT_ACCESS_F. Each bit set in these registers corresponds to + * an entry containing a Matching rule for this RuleSet. + * + * For a single Matching rule linked, only one bit is set. However, when an + * entry links multiple Matching rules, forming what's termed a 'complex rule', + * multiple bits are set in these registers. + * + * This function checks that, for complex rules, the entries containing the + * linked Matching rules are contiguous in terms of their indices. It calculates + * and returns the number of these contiguous entries. + * + * Returns: + * - 0 if the entry is empty and can be safely overwritten + * - 1 if the entry represents a simple rule + * - The number of contiguous entries if it is the root entry of a complex + * rule + * - -ENOTEMPTY if the entry is part of a complex rule but not the root + * entry + * - -EINVAL if the validation fails + */ +static int ksz9477_acl_get_cont_entr(struct ksz_device *dev, int port, + int index) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int start_idx, end_idx, contiguous_count; + unsigned long val; + u8 vale, valf; + u8 *entry; + int i; + + entry = &acles->entries[index].entry[0]; + vale = entry[KSZ9477_ACL_PORT_ACCESS_E]; + valf = entry[KSZ9477_ACL_PORT_ACCESS_F]; + + val = (vale << 8) | valf; + + /* If no bits are set, return an appropriate value or error */ + if (!val) { + if (ksz9477_acl_is_valid_matching_rule(entry)) { + /* Looks like we are about to corrupt some complex rule. + * Do not print an error here, as this is a normal case + * when we are trying to find a free or starting entry. + */ + dev_dbg(dev->dev, "ACL: entry %d starting with a valid matching rule, but no bits set in RuleSet\n", + index); + return -ENOTEMPTY; + } + + /* This entry does not contain a valid matching rule */ + return 0; + } + + start_idx = find_first_bit((unsigned long *)&val, 16); + end_idx = find_last_bit((unsigned long *)&val, 16); + + /* Calculate the contiguous count */ + contiguous_count = end_idx - start_idx + 1; + + /* Check if the number of bits set in val matches our calculated count */ + if (contiguous_count != hweight16(val)) { + /* Probably we have a fragmented complex rule, which is not + * supported by this driver. + */ + dev_err(dev->dev, "ACL: number of bits set in RuleSet does not match calculated count\n"); + return -EINVAL; + } + + /* loop over the contiguous entries and check for valid matching rules */ + for (i = start_idx; i <= end_idx; i++) { + u8 *current_entry = &acles->entries[i].entry[0]; + + if (!ksz9477_acl_is_valid_matching_rule(current_entry)) { + /* we have something linked without a valid matching + * rule. ACL table? + */ + dev_err(dev->dev, "ACL: entry %d does not contain a valid matching rule\n", + i); + return -EINVAL; + } + + if (i > start_idx) { + vale = current_entry[KSZ9477_ACL_PORT_ACCESS_E]; + valf = current_entry[KSZ9477_ACL_PORT_ACCESS_F]; + /* Following entry should have empty linkage list */ + if (vale || valf) { + dev_err(dev->dev, "ACL: entry %d has non-empty RuleSet linkage\n", + i); + return -EINVAL; + } + } + } + + return contiguous_count; +} + +/** + * ksz9477_acl_update_linkage - Update the RuleSet linkage for an ACL entry + * after a move operation. + * + * @dev: Pointer to the ksz_device. + * @entry: Pointer to the ACL entry array. + * @old_idx: The original index of the ACL entry before moving. + * @new_idx: The new index of the ACL entry after moving. + * + * This function updates the RuleSet linkage bits for an ACL entry when + * it's moved from one position to another in the ACL table. The RuleSet + * linkage is represented by two 8-bit registers, which are combined + * into a 16-bit value for easier manipulation. The linkage bits are shifted + * based on the difference between the old and new index. If any bits are lost + * during the shift operation, an error is returned. + * + * Note: Fragmentation within a RuleSet is not supported. Hence, entries must + * be moved as complete blocks, maintaining the integrity of the RuleSet. + * + * Returns: 0 on success, or -EINVAL if any RuleSet linkage bits are lost + * during the move. + */ +static int ksz9477_acl_update_linkage(struct ksz_device *dev, u8 *entry, + u16 old_idx, u16 new_idx) +{ + unsigned int original_bit_count; + unsigned long rule_linkage; + u8 vale, valf, val0; + int shift; + + val0 = entry[KSZ9477_ACL_PORT_ACCESS_0]; + vale = entry[KSZ9477_ACL_PORT_ACCESS_E]; + valf = entry[KSZ9477_ACL_PORT_ACCESS_F]; + + /* Combine the two u8 values into one u16 for easier manipulation */ + rule_linkage = (vale << 8) | valf; + original_bit_count = hweight16(rule_linkage); + + /* Even if HW is able to handle fragmented RuleSet, we don't support it. + * RuleSet is filled only for the first entry of the set. + */ + if (!rule_linkage) + return 0; + + if (val0 != old_idx) { + dev_err(dev->dev, "ACL: entry %d has unexpected ActionRule linkage: %d\n", + old_idx, val0); + return -EINVAL; + } + + val0 = new_idx; + + /* Calculate the number of positions to shift */ + shift = new_idx - old_idx; + + /* Shift the RuleSet */ + if (shift > 0) + rule_linkage <<= shift; + else + rule_linkage >>= -shift; + + /* Check that no bits were lost in the process */ + if (original_bit_count != hweight16(rule_linkage)) { + dev_err(dev->dev, "ACL RuleSet linkage bits lost during move\n"); + return -EINVAL; + } + + entry[KSZ9477_ACL_PORT_ACCESS_0] = val0; + + /* Update the RuleSet bitfields in the entry */ + entry[KSZ9477_ACL_PORT_ACCESS_E] = (rule_linkage >> 8) & 0xFF; + entry[KSZ9477_ACL_PORT_ACCESS_F] = rule_linkage & 0xFF; + + return 0; +} + +/** + * ksz9477_validate_and_get_src_count - Validate source and destination indices + * and determine the source entry count. + * @dev: Pointer to the KSZ device structure. + * @port: Port number on the KSZ device where the ACL entries reside. + * @src_idx: Index of the starting ACL entry that needs to be validated. + * @dst_idx: Index of the destination where the source entries are intended to + * be moved. + * @src_count: Pointer to the variable that will hold the number of contiguous + * source entries if the validation passes. + * @dst_count: Pointer to the variable that will hold the number of contiguous + * destination entries if the validation passes. + * + * This function performs validation on the source and destination indices + * provided for ACL entries. It checks if the indices are within the valid + * range, and if the source entries are contiguous. Additionally, the function + * ensures that there's adequate space at the destination for the source entries + * and that the destination index isn't in the middle of a RuleSet. If all + * validations pass, the function returns the number of contiguous source and + * destination entries. + * + * Return: 0 on success, otherwise returns a negative error code if any + * validation check fails. + */ +static int ksz9477_validate_and_get_src_count(struct ksz_device *dev, int port, + int src_idx, int dst_idx, + int *src_count, int *dst_count) +{ + int ret; + + if (src_idx >= KSZ9477_ACL_MAX_ENTRIES || + dst_idx >= KSZ9477_ACL_MAX_ENTRIES) { + dev_err(dev->dev, "ACL: invalid entry index\n"); + return -EINVAL; + } + + /* Validate if the source entries are contiguous */ + ret = ksz9477_acl_get_cont_entr(dev, port, src_idx); + if (ret < 0) + return ret; + *src_count = ret; + + if (!*src_count) { + dev_err(dev->dev, "ACL: source entry is empty\n"); + return -EINVAL; + } + + if (dst_idx + *src_count >= KSZ9477_ACL_MAX_ENTRIES) { + dev_err(dev->dev, "ACL: Not enough space at the destination. Move operation will fail.\n"); + return -EINVAL; + } + + /* Validate if the destination entry is empty or not in the middle of + * a RuleSet. + */ + ret = ksz9477_acl_get_cont_entr(dev, port, dst_idx); + if (ret < 0) + return ret; + *dst_count = ret; + + return 0; +} + +/** + * ksz9477_move_entries_downwards - Move a range of ACL entries downwards in + * the list. + * @dev: Pointer to the KSZ device structure. + * @acles: Pointer to the structure encapsulating all the ACL entries. + * @start_idx: Starting index of the entries to be relocated. + * @num_entries_to_move: Number of consecutive entries to be relocated. + * @end_idx: Destination index where the first entry should be situated post + * relocation. + * + * This function is responsible for rearranging a specific block of ACL entries + * by shifting them downwards in the list based on the supplied source and + * destination indices. It ensures that the linkage between the ACL entries is + * maintained accurately after the relocation. + * + * Return: 0 on successful relocation of entries, otherwise returns a negative + * error code. + */ +static int ksz9477_move_entries_downwards(struct ksz_device *dev, + struct ksz9477_acl_entries *acles, + u16 start_idx, + u16 num_entries_to_move, + u16 end_idx) +{ + struct ksz9477_acl_entry *e; + int ret, i; + + for (i = start_idx; i < end_idx; i++) { + e = &acles->entries[i]; + *e = acles->entries[i + num_entries_to_move]; + + ret = ksz9477_acl_update_linkage(dev, &e->entry[0], + i + num_entries_to_move, i); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * ksz9477_move_entries_upwards - Move a range of ACL entries upwards in the + * list. + * @dev: Pointer to the KSZ device structure. + * @acles: Pointer to the structure holding all the ACL entries. + * @start_idx: The starting index of the entries to be moved. + * @num_entries_to_move: Number of contiguous entries to be moved. + * @target_idx: The destination index where the first entry should be placed + * after moving. + * + * This function rearranges a chunk of ACL entries by moving them upwards + * in the list based on the given source and destination indices. The reordering + * process preserves the linkage between entries by updating it accordingly. + * + * Return: 0 if the entries were successfully moved, otherwise a negative error + * code. + */ +static int ksz9477_move_entries_upwards(struct ksz_device *dev, + struct ksz9477_acl_entries *acles, + u16 start_idx, u16 num_entries_to_move, + u16 target_idx) +{ + struct ksz9477_acl_entry *e; + int ret, i, b; + + for (i = start_idx; i > target_idx; i--) { + b = i + num_entries_to_move - 1; + + e = &acles->entries[b]; + *e = acles->entries[i - 1]; + + ret = ksz9477_acl_update_linkage(dev, &e->entry[0], i - 1, b); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * ksz9477_acl_move_entries - Move a block of contiguous ACL entries from a + * source to a destination index. + * @dev: Pointer to the KSZ9477 device structure. + * @port: Port number. + * @src_idx: Index of the starting source ACL entry. + * @dst_idx: Index of the starting destination ACL entry. + * + * This function aims to move a block of contiguous ACL entries from the source + * index to the destination index while ensuring the integrity and validity of + * the ACL table. + * + * In case of any errors during the adjustments or copying, the function will + * restore the ACL entries to their original state from the backup. + * + * Return: 0 if the move operation is successful. Returns -EINVAL for validation + * errors or other error codes based on specific failure conditions. + */ +static int ksz9477_acl_move_entries(struct ksz_device *dev, int port, + u16 src_idx, u16 dst_idx) +{ + struct ksz9477_acl_entry buffer[KSZ9477_ACL_MAX_ENTRIES]; + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int src_count, ret, dst_count; + + /* Nothing to do */ + if (src_idx == dst_idx) + return 0; + + ret = ksz9477_validate_and_get_src_count(dev, port, src_idx, dst_idx, + &src_count, &dst_count); + if (ret) + return ret; + + /* In case dst_index is greater than src_index, we need to adjust the + * destination index to account for the entries that will be moved + * downwards and the size of the entry located at dst_idx. + */ + if (dst_idx > src_idx) + dst_idx = dst_idx + dst_count - src_count; + + /* Copy source block to buffer and update its linkage */ + for (int i = 0; i < src_count; i++) { + buffer[i] = acles->entries[src_idx + i]; + ret = ksz9477_acl_update_linkage(dev, &buffer[i].entry[0], + src_idx + i, dst_idx + i); + if (ret < 0) + return ret; + } + + /* Adjust other entries and their linkage based on destination */ + if (dst_idx > src_idx) { + ret = ksz9477_move_entries_downwards(dev, acles, src_idx, + src_count, dst_idx); + } else { + ret = ksz9477_move_entries_upwards(dev, acles, src_idx, + src_count, dst_idx); + } + if (ret < 0) + return ret; + + /* Copy buffer to destination block */ + for (int i = 0; i < src_count; i++) + acles->entries[dst_idx + i] = buffer[i]; + + return 0; +} + +/** + * ksz9477_get_next_block_start - Identify the starting index of the next ACL + * block. + * @dev: Pointer to the device structure. + * @port: The port number on which the ACL entries are being checked. + * @start: The starting index from which the search begins. + * + * This function looks for the next valid ACL block starting from the provided + * 'start' index and returns the beginning index of that block. If the block is + * invalid or if it reaches the end of the ACL entries without finding another + * block, it returns the maximum ACL entries count. + * + * Returns: + * - The starting index of the next valid ACL block. + * - KSZ9477_ACL_MAX_ENTRIES if no other valid blocks are found after 'start'. + * - A negative error code if an error occurs while checking. + */ +static int ksz9477_get_next_block_start(struct ksz_device *dev, int port, + int start) +{ + int block_size; + + for (int i = start; i < KSZ9477_ACL_MAX_ENTRIES;) { + block_size = ksz9477_acl_get_cont_entr(dev, port, i); + if (block_size < 0 && block_size != -ENOTEMPTY) + return block_size; + + if (block_size > 0) + return i; + + i++; + } + return KSZ9477_ACL_MAX_ENTRIES; +} + +/** + * ksz9477_swap_acl_blocks - Swap two ACL blocks + * @dev: Pointer to the device structure. + * @port: The port number on which the ACL blocks are to be swapped. + * @i: The starting index of the first ACL block. + * @j: The starting index of the second ACL block. + * + * This function is used to swap two ACL blocks present at given indices. The + * main purpose is to aid in the sorting and reordering of ACL blocks based on + * certain criteria, e.g., priority. It checks the validity of the block at + * index 'i', ensuring it's not an empty block, and then proceeds to swap it + * with the block at index 'j'. + * + * Returns: + * - 0 on successful swapping of blocks. + * - -EINVAL if the block at index 'i' is empty. + * - A negative error code if any other error occurs during the swap. + */ +static int ksz9477_swap_acl_blocks(struct ksz_device *dev, int port, int i, + int j) +{ + int ret, current_block_size; + + current_block_size = ksz9477_acl_get_cont_entr(dev, port, i); + if (current_block_size < 0) + return current_block_size; + + if (!current_block_size) { + dev_err(dev->dev, "ACL: swapping empty entry %d\n", i); + return -EINVAL; + } + + ret = ksz9477_acl_move_entries(dev, port, i, j); + if (ret) + return ret; + + ret = ksz9477_acl_move_entries(dev, port, j - current_block_size, i); + if (ret) + return ret; + + return 0; +} + +/** + * ksz9477_sort_acl_entr_no_back - Sort ACL entries for a given port based on + * priority without backing up entries. + * @dev: Pointer to the device structure. + * @port: The port number whose ACL entries need to be sorted. + * + * This function sorts ACL entries of the specified port using a variant of the + * bubble sort algorithm. It operates on blocks of ACL entries rather than + * individual entries. Each block's starting point is identified and then + * compared with subsequent blocks based on their priority. If the current + * block has a lower priority than the subsequent block, the two blocks are + * swapped. + * + * This is done in order to maintain an organized order of ACL entries based on + * priority, ensuring efficient and predictable ACL rule application. + * + * Returns: + * - 0 on successful sorting of entries. + * - A negative error code if any issue arises during sorting, e.g., + * if the function is unable to get the next block start. + */ +static int ksz9477_sort_acl_entr_no_back(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + struct ksz9477_acl_entry *curr, *next; + int i, j, ret; + + /* Bubble sort */ + for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES;) { + curr = &acles->entries[i]; + + j = ksz9477_get_next_block_start(dev, port, i + 1); + if (j < 0) + return j; + + while (j < KSZ9477_ACL_MAX_ENTRIES) { + next = &acles->entries[j]; + + if (curr->prio > next->prio) { + ret = ksz9477_swap_acl_blocks(dev, port, i, j); + if (ret) + return ret; + } + + j = ksz9477_get_next_block_start(dev, port, j + 1); + if (j < 0) + return j; + } + + i = ksz9477_get_next_block_start(dev, port, i + 1); + if (i < 0) + return i; + } + + return 0; +} + +/** + * ksz9477_sort_acl_entries - Sort the ACL entries for a given port. + * @dev: Pointer to the KSZ device. + * @port: Port number. + * + * This function sorts the Access Control List (ACL) entries for a specified + * port. Before sorting, a backup of the original entries is created. If the + * sorting process fails, the function will log error messages displaying both + * the original and attempted sorted entries, and then restore the original + * entries from the backup. + * + * Return: 0 if the sorting succeeds, otherwise a negative error code. + */ +int ksz9477_sort_acl_entries(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_entry backup[KSZ9477_ACL_MAX_ENTRIES]; + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int ret; + + /* create a backup of the ACL entries, if something goes wrong + * we can restore the ACL entries. + */ + memcpy(backup, acles->entries, sizeof(backup)); + + ret = ksz9477_sort_acl_entr_no_back(dev, port); + if (ret) { + dev_err(dev->dev, "ACL: failed to sort entries for port %d\n", + port); + dev_err(dev->dev, "ACL dump before sorting:\n"); + ksz9477_dump_acl(dev, backup); + dev_err(dev->dev, "ACL dump after sorting:\n"); + ksz9477_dump_acl(dev, acles->entries); + /* Restore the original entries */ + memcpy(acles->entries, backup, sizeof(backup)); + } + + return ret; +} + +/** + * ksz9477_acl_wait_ready - Waits for the ACL operation to complete on a given + * port. + * @dev: The ksz_device instance. + * @port: The port number to wait for. + * + * This function checks if the ACL write or read operation is completed by + * polling the specified register. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_wait_ready(struct ksz_device *dev, int port) +{ + unsigned int wr_mask = KSZ9477_ACL_WRITE_DONE | KSZ9477_ACL_READ_DONE; + unsigned int val, reg; + int ret; + + reg = dev->dev_ops->get_port_addr(port, KSZ9477_PORT_ACL_CTRL_0); + + ret = regmap_read_poll_timeout(dev->regmap[0], reg, val, + (val & wr_mask) == wr_mask, 1000, 10000); + if (ret) + dev_err(dev->dev, "Failed to read/write ACL table\n"); + + return ret; +} + +/** + * ksz9477_acl_entry_write - Writes an ACL entry to a given port at the + * specified index. + * @dev: The ksz_device instance. + * @port: The port number to write the ACL entry to. + * @entry: A pointer to the ACL entry data. + * @idx: The index at which to write the ACL entry. + * + * This function writes the provided ACL entry to the specified port at the + * given index. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_entry_write(struct ksz_device *dev, int port, u8 *entry, + int idx) +{ + int ret, i; + u8 val; + + for (i = 0; i < KSZ9477_ACL_ENTRY_SIZE; i++) { + ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_0 + i, entry[i]); + if (ret) { + dev_err(dev->dev, "Failed to write ACL entry %d\n", i); + return ret; + } + } + + /* write everything down */ + val = FIELD_PREP(KSZ9477_ACL_INDEX_M, idx) | KSZ9477_ACL_WRITE; + ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_CTRL_0, val); + if (ret) + return ret; + + /* wait until everything is written */ + return ksz9477_acl_wait_ready(dev, port); +} + +/** + * ksz9477_acl_port_enable - Enables ACL functionality on a given port. + * @dev: The ksz_device instance. + * @port: The port number on which to enable ACL functionality. + * + * This function enables ACL functionality on the specified port by configuring + * the appropriate control registers. It returns 0 if the operation is + * successful, or a negative error code if an error occurs. + * + * 0xn801 - KSZ9477S 5.2.8.2 Port Priority Control Register + * Bit 7 - Highest Priority + * Bit 6 - OR'ed Priority + * Bit 4 - MAC Address Priority Classification + * Bit 3 - VLAN Priority Classification + * Bit 2 - 802.1p Priority Classification + * Bit 1 - Diffserv Priority Classification + * Bit 0 - ACL Priority Classification + * + * Current driver implementation sets 802.1p priority classification by default. + * In this function we add ACL priority classification with OR'ed priority. + * According to testing, priority set by ACL will supersede the 802.1p priority. + * + * 0xn803 - KSZ9477S 5.2.8.4 Port Authentication Control Register + * Bit 2 - Access Control List (ACL) Enable + * Bits 1:0 - Authentication Mode + * 00 = Reserved + * 01 = Block Mode. Authentication is enabled. When ACL is + * enabled, all traffic that misses the ACL rules is + * blocked; otherwise ACL actions apply. + * 10 = Pass Mode. Authentication is disabled. When ACL is + * enabled, all traffic that misses the ACL rules is + * forwarded; otherwise ACL actions apply. + * 11 = Trap Mode. Authentication is enabled. All traffic is + * forwarded to the host port. When ACL is enabled, all + * traffic that misses the ACL rules is blocked; otherwise + * ACL actions apply. + * + * We are using Pass Mode int this function. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_port_enable(struct ksz_device *dev, int port) +{ + int ret; + + ret = ksz_prmw8(dev, port, P_PRIO_CTRL, 0, PORT_ACL_PRIO_ENABLE | + PORT_OR_PRIO); + if (ret) + return ret; + + return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL, + PORT_ACL_ENABLE | + FIELD_PREP(PORT_AUTHEN_MODE, PORT_AUTHEN_PASS)); +} + +/** + * ksz9477_acl_port_disable - Disables ACL functionality on a given port. + * @dev: The ksz_device instance. + * @port: The port number on which to disable ACL functionality. + * + * This function disables ACL functionality on the specified port by writing a + * value of 0 to the REG_PORT_MRI_AUTHEN_CTRL control register and remove + * PORT_ACL_PRIO_ENABLE bit from P_PRIO_CTRL register. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_port_disable(struct ksz_device *dev, int port) +{ + int ret; + + ret = ksz_prmw8(dev, port, P_PRIO_CTRL, PORT_ACL_PRIO_ENABLE, 0); + if (ret) + return ret; + + return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL, 0); +} + +/** + * ksz9477_acl_write_list - Write a list of ACL entries to a given port. + * @dev: The ksz_device instance. + * @port: The port number on which to write ACL entries. + * + * This function enables ACL functionality on the specified port, writes a list + * of ACL entries to the port, and disables ACL functionality if there are no + * entries. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +int ksz9477_acl_write_list(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int ret, i; + + /* ACL should be enabled before writing entries */ + ret = ksz9477_acl_port_enable(dev, port); + if (ret) + return ret; + + /* write all entries */ + for (i = 0; i < ARRAY_SIZE(acles->entries); i++) { + u8 *entry = acles->entries[i].entry; + + /* Check if entry was removed and should be zeroed. + * If last fields of the entry are not zero, it means + * it is removed locally but currently not synced with the HW. + * So, we will write it down to the HW to remove it. + */ + if (i >= acles->entries_count && + entry[KSZ9477_ACL_PORT_ACCESS_10] == 0 && + entry[KSZ9477_ACL_PORT_ACCESS_11] == 0) + continue; + + ret = ksz9477_acl_entry_write(dev, port, entry, i); + if (ret) + return ret; + + /* now removed entry is clean on HW side, so it can + * in the cache too + */ + if (i >= acles->entries_count && + entry[KSZ9477_ACL_PORT_ACCESS_10] != 0 && + entry[KSZ9477_ACL_PORT_ACCESS_11] != 0) { + entry[KSZ9477_ACL_PORT_ACCESS_10] = 0; + entry[KSZ9477_ACL_PORT_ACCESS_11] = 0; + } + } + + if (!acles->entries_count) + return ksz9477_acl_port_disable(dev, port); + + return 0; +} + +/** + * ksz9477_acl_remove_entries - Remove ACL entries with a given cookie from a + * specified ksz9477_acl_entries structure. + * @dev: The ksz_device instance. + * @port: The port number on which to remove ACL entries. + * @acles: The ksz9477_acl_entries instance. + * @cookie: The cookie value to match for entry removal. + * + * This function iterates through the entries array, removing any entries with + * a matching cookie value. The remaining entries are then shifted down to fill + * the gap. + */ +void ksz9477_acl_remove_entries(struct ksz_device *dev, int port, + struct ksz9477_acl_entries *acles, + unsigned long cookie) +{ + int entries_count = acles->entries_count; + int ret, i, src_count; + int src_idx = -1; + + if (!entries_count) + return; + + /* Search for the first position with the cookie */ + for (i = 0; i < entries_count; i++) { + if (acles->entries[i].cookie == cookie) { + src_idx = i; + break; + } + } + + /* No entries with the matching cookie found */ + if (src_idx == -1) + return; + + /* Get the size of the cookie entry. We may have complex entries. */ + src_count = ksz9477_acl_get_cont_entr(dev, port, src_idx); + if (src_count <= 0) + return; + + /* Move all entries down to overwrite removed entry with the cookie */ + ret = ksz9477_move_entries_downwards(dev, acles, src_idx, + src_count, + entries_count - src_count); + if (ret) { + dev_err(dev->dev, "Failed to move ACL entries down\n"); + return; + } + + /* Overwrite new empty places at the end of the list with zeros to make + * sure not unexpected things will happen or no unexplored quirks will + * come out. + */ + for (i = entries_count - src_count; i < entries_count; i++) { + struct ksz9477_acl_entry *entry = &acles->entries[i]; + + memset(entry, 0, sizeof(*entry)); + + /* Set all access bits to be able to write zeroed entry to HW */ + entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff; + entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff; + } + + /* Adjust the total entries count */ + acles->entries_count -= src_count; +} + +/** + * ksz9477_port_acl_init - Initialize the ACL for a specified port on a ksz + * device. + * @dev: The ksz_device instance. + * @port: The port number to initialize the ACL for. + * + * This function allocates memory for an acl structure, associates it with the + * specified port, and initializes the ACL entries to a default state. The + * entries are then written using the ksz9477_acl_write_list function, ensuring + * the ACL has a predictable initial hardware state. + * + * Returns: 0 on success, or an error code on failure. + */ +int ksz9477_port_acl_init(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_entries *acles; + struct ksz9477_acl_priv *acl; + int ret, i; + + acl = kzalloc(sizeof(*acl), GFP_KERNEL); + if (!acl) + return -ENOMEM; + + dev->ports[port].acl_priv = acl; + + acles = &acl->acles; + /* write all entries */ + for (i = 0; i < ARRAY_SIZE(acles->entries); i++) { + u8 *entry = acles->entries[i].entry; + + /* Set all access bits to be able to write zeroed + * entry + */ + entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff; + entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff; + } + + ret = ksz9477_acl_write_list(dev, port); + if (ret) + goto free_acl; + + return 0; + +free_acl: + kfree(dev->ports[port].acl_priv); + dev->ports[port].acl_priv = NULL; + + return ret; +} + +/** + * ksz9477_port_acl_free - Free the ACL resources for a specified port on a ksz + * device. + * @dev: The ksz_device instance. + * @port: The port number to initialize the ACL for. + * + * This disables the ACL for the specified port and frees the associated memory, + */ +void ksz9477_port_acl_free(struct ksz_device *dev, int port) +{ + if (!dev->ports[port].acl_priv) + return; + + ksz9477_acl_port_disable(dev, port); + + kfree(dev->ports[port].acl_priv); + dev->ports[port].acl_priv = NULL; +} + +/** + * ksz9477_acl_set_reg - Set entry[16] and entry[17] depending on the updated + * entry[] + * @entry: An array containing the entries + * @reg: The register of the entry that needs to be updated + * @value: The value to be assigned to the updated entry + * + * This function updates the entry[] array based on the provided register and + * value. It also sets entry[0x10] and entry[0x11] according to the ACL byte + * enable rules. + * + * 0x10 - Byte Enable [15:8] + * + * Each bit enables accessing one of the ACL bytes when a read or write is + * initiated by writing to the Port ACL Byte Enable LSB Register. + * Bit 0 applies to the Port ACL Access 7 Register + * Bit 1 applies to the Port ACL Access 6 Register, etc. + * Bit 7 applies to the Port ACL Access 0 Register + * 1 = Byte is selected for read/write + * 0 = Byte is not selected + * + * 0x11 - Byte Enable [7:0] + * + * Each bit enables accessing one of the ACL bytes when a read or write is + * initiated by writing to the Port ACL Byte Enable LSB Register. + * Bit 0 applies to the Port ACL Access F Register + * Bit 1 applies to the Port ACL Access E Register, etc. + * Bit 7 applies to the Port ACL Access 8 Register + * 1 = Byte is selected for read/write + * 0 = Byte is not selected + */ +static void ksz9477_acl_set_reg(u8 *entry, enum ksz9477_acl_port_access reg, + u8 value) +{ + if (reg >= KSZ9477_ACL_PORT_ACCESS_0 && + reg <= KSZ9477_ACL_PORT_ACCESS_7) { + entry[KSZ9477_ACL_PORT_ACCESS_10] |= + BIT(KSZ9477_ACL_PORT_ACCESS_7 - reg); + } else if (reg >= KSZ9477_ACL_PORT_ACCESS_8 && + reg <= KSZ9477_ACL_PORT_ACCESS_F) { + entry[KSZ9477_ACL_PORT_ACCESS_11] |= + BIT(KSZ9477_ACL_PORT_ACCESS_F - reg); + } else { + WARN_ON(1); + return; + } + + entry[reg] = value; +} + +/** + * ksz9477_acl_matching_rule_cfg_l2 - Configure an ACL filtering entry to match + * L2 types of Ethernet frames + * @entry: Pointer to ACL entry buffer + * @ethertype: Ethertype value + * @eth_addr: Pointer to Ethernet address + * @is_src: If true, match the source MAC address; if false, match the + * destination MAC address + * + * This function configures an Access Control List (ACL) filtering + * entry to match Layer 2 types of Ethernet frames based on the provided + * ethertype and Ethernet address. Additionally, it can match either the source + * or destination MAC address depending on the value of the is_src parameter. + * + * Register Descriptions for MD = 01 and ENB != 00 (Layer 2 MAC header + * filtering) + * + * 0x01 - Mode and Enable + * Bits 5:4 - MD (Mode) + * 01 = Layer 2 MAC header or counter filtering + * Bits 3:2 - ENB (Enable) + * 01 = Comparison is performed only on the TYPE value + * 10 = Comparison is performed only on the MAC Address value + * 11 = Both the MAC Address and TYPE are tested + * Bit 1 - S/D (Source / Destination) + * 0 = Destination address + * 1 = Source address + * Bit 0 - EQ (Equal / Not Equal) + * 0 = Not Equal produces true result + * 1 = Equal produces true result + * + * 0x02-0x07 - MAC Address + * 0x02 - MAC Address [47:40] + * 0x03 - MAC Address [39:32] + * 0x04 - MAC Address [31:24] + * 0x05 - MAC Address [23:16] + * 0x06 - MAC Address [15:8] + * 0x07 - MAC Address [7:0] + * + * 0x08-0x09 - EtherType + * 0x08 - EtherType [15:8] + * 0x09 - EtherType [7:0] + */ +static void ksz9477_acl_matching_rule_cfg_l2(u8 *entry, u16 ethertype, + u8 *eth_addr, bool is_src) +{ + u8 enb = 0; + u8 val; + + if (ethertype) + enb |= KSZ9477_ACL_ENB_L2_TYPE; + if (eth_addr) + enb |= KSZ9477_ACL_ENB_L2_MAC; + + val = FIELD_PREP(KSZ9477_ACL_MD_MASK, KSZ9477_ACL_MD_L2_MAC) | + FIELD_PREP(KSZ9477_ACL_ENB_MASK, enb) | + FIELD_PREP(KSZ9477_ACL_SD_SRC, is_src) | KSZ9477_ACL_EQ_EQUAL; + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_1, val); + + if (eth_addr) { + int i; + + for (i = 0; i < ETH_ALEN; i++) { + ksz9477_acl_set_reg(entry, + KSZ9477_ACL_PORT_ACCESS_2 + i, + eth_addr[i]); + } + } + + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_8, ethertype >> 8); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_9, ethertype & 0xff); +} + +/** + * ksz9477_acl_action_rule_cfg - Set action for an ACL entry + * @entry: Pointer to the ACL entry + * @force_prio: If true, force the priority value + * @prio_val: Priority value + * + * This function sets the action for the specified ACL entry. It prepares + * the priority mode and traffic class values and updates the entry's + * action registers accordingly. Currently, there is no port or VLAN PCP + * remapping. + * + * ACL Action Rule Parameters for Non-Count Modes (MD ≠ 01 or ENB ≠ 00) + * + * 0x0A - PM, P, RPE, RP[2:1] + * Bits 7:6 - PM[1:0] - Priority Mode + * 00 = ACL does not specify the packet priority. Priority is + * determined by standard QoS functions. + * 01 = Change packet priority to P[2:0] if it is greater than QoS + * result. + * 10 = Change packet priority to P[2:0] if it is smaller than the + * QoS result. + * 11 = Always change packet priority to P[2:0]. + * Bits 5:3 - P[2:0] - Priority value + * Bit 2 - RPE - Remark Priority Enable + * Bits 1:0 - RP[2:1] - Remarked Priority value (bits 2:1) + * 0 = Disable priority remarking + * 1 = Enable priority remarking. VLAN tag priority (PCP) bits are + * replaced by RP[2:0]. + * + * 0x0B - RP[0], MM + * Bit 7 - RP[0] - Remarked Priority value (bit 0) + * Bits 6:5 - MM[1:0] - Map Mode + * 00 = No forwarding remapping + * 01 = The forwarding map in FORWARD is OR'ed with the forwarding + * map from the Address Lookup Table. + * 10 = The forwarding map in FORWARD is AND'ed with the forwarding + * map from the Address Lookup Table. + * 11 = The forwarding map in FORWARD replaces the forwarding map + * from the Address Lookup Table. + * 0x0D - FORWARD[n:0] + * Bits 7:0 - FORWARD[n:0] - Forwarding map. Bit 0 = port 1, + * bit 1 = port 2, etc. + * 1 = enable forwarding to this port + * 0 = do not forward to this port + */ +void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val) +{ + u8 prio_mode, val; + + if (force_prio) + prio_mode = KSZ9477_ACL_PM_REPLACE; + else + prio_mode = KSZ9477_ACL_PM_DISABLE; + + val = FIELD_PREP(KSZ9477_ACL_PM_M, prio_mode) | + FIELD_PREP(KSZ9477_ACL_P_M, prio_val); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_A, val); + + /* no port or VLAN PCP remapping for now */ + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_B, 0); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_D, 0); +} + +/** + * ksz9477_acl_processing_rule_set_action - Set the action for the processing + * rule set. + * @entry: Pointer to the ACL entry + * @action_idx: Index of the action to be applied + * + * This function sets the action for the processing rule set by updating the + * appropriate register in the entry. There can be only one action per + * processing rule. + * + * Access Control List (ACL) Processing Rule Registers: + * + * 0x00 - First Rule Number (FRN) + * Bits 3:0 - First Rule Number. Pointer to an Action rule entry. + */ +void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx) +{ + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_0, action_idx); +} + +/** + * ksz9477_acl_processing_rule_add_match - Add a matching rule to the rule set + * @entry: Pointer to the ACL entry + * @match_idx: Index of the matching rule to be added + * + * This function adds a matching rule to the rule set by updating the + * appropriate bits in the entry's rule set registers. + * + * Access Control List (ACL) Processing Rule Registers: + * + * 0x0E - RuleSet [15:8] + * Bits 7:0 - RuleSet [15:8] Specifies a set of one or more Matching rule + * entries. RuleSet has one bit for each of the 16 Matching rule entries. + * If multiple Matching rules are selected, then all conditions will be + * AND'ed to produce a final match result. + * 0 = Matching rule not selected + * 1 = Matching rule selected + * + * 0x0F - RuleSet [7:0] + * Bits 7:0 - RuleSet [7:0] + */ +static void ksz9477_acl_processing_rule_add_match(u8 *entry, u8 match_idx) +{ + u8 vale = entry[KSZ9477_ACL_PORT_ACCESS_E]; + u8 valf = entry[KSZ9477_ACL_PORT_ACCESS_F]; + + if (match_idx < 8) + valf |= BIT(match_idx); + else + vale |= BIT(match_idx - 8); + + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_E, vale); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_F, valf); +} + +/** + * ksz9477_acl_get_init_entry - Get a new uninitialized entry for a specified + * port on a ksz_device. + * @dev: The ksz_device instance. + * @port: The port number to get the uninitialized entry for. + * @cookie: The cookie to associate with the entry. + * @prio: The priority to associate with the entry. + * + * This function retrieves the next available ACL entry for the specified port, + * clears all access flags, and associates it with the current cookie. + * + * Returns: A pointer to the new uninitialized ACL entry. + */ +static struct ksz9477_acl_entry * +ksz9477_acl_get_init_entry(struct ksz_device *dev, int port, + unsigned long cookie, u32 prio) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + struct ksz9477_acl_entry *entry; + + entry = &acles->entries[acles->entries_count]; + entry->cookie = cookie; + entry->prio = prio; + + /* clear all access flags */ + entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0; + entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0; + + return entry; +} + +/** + * ksz9477_acl_match_process_l2 - Configure Layer 2 ACL matching rules and + * processing rules. + * @dev: Pointer to the ksz_device. + * @port: Port number. + * @ethtype: Ethernet type. + * @src_mac: Source MAC address. + * @dst_mac: Destination MAC address. + * @cookie: The cookie to associate with the entry. + * @prio: The priority of the entry. + * + * This function sets up matching and processing rules for Layer 2 ACLs. + * It takes into account that only one MAC per entry is supported. + */ +void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port, + u16 ethtype, u8 *src_mac, u8 *dst_mac, + unsigned long cookie, u32 prio) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + struct ksz9477_acl_entry *entry; + + entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio); + + /* ACL supports only one MAC per entry */ + if (src_mac && dst_mac) { + ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, src_mac, + true); + + /* Add both match entries to first processing rule */ + ksz9477_acl_processing_rule_add_match(entry->entry, + acles->entries_count); + acles->entries_count++; + ksz9477_acl_processing_rule_add_match(entry->entry, + acles->entries_count); + + entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio); + ksz9477_acl_matching_rule_cfg_l2(entry->entry, 0, dst_mac, + false); + acles->entries_count++; + } else { + u8 *mac = src_mac ? src_mac : dst_mac; + bool is_src = src_mac ? true : false; + + ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, mac, + is_src); + ksz9477_acl_processing_rule_add_match(entry->entry, + acles->entries_count); + acles->entries_count++; + } +} diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 2710afad4f..cac4a607e5 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -66,10 +66,7 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c) if (!dev) return; - if (dev->dev_ops->reset) - dev->dev_ops->reset(dev); - - dsa_switch_shutdown(dev->ds); + ksz_switch_shutdown(dev); i2c_set_clientdata(i2c, NULL); } diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index cba3dba58b..f3a205ee48 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -112,19 +112,6 @@ #define REG_SW_IBA_SYNC__1 0x010C -#define REG_SW_IO_STRENGTH__1 0x010D -#define SW_DRIVE_STRENGTH_M 0x7 -#define SW_DRIVE_STRENGTH_2MA 0 -#define SW_DRIVE_STRENGTH_4MA 1 -#define SW_DRIVE_STRENGTH_8MA 2 -#define SW_DRIVE_STRENGTH_12MA 3 -#define SW_DRIVE_STRENGTH_16MA 4 -#define SW_DRIVE_STRENGTH_20MA 5 -#define SW_DRIVE_STRENGTH_24MA 6 -#define SW_DRIVE_STRENGTH_28MA 7 -#define SW_HI_SPEED_DRIVE_STRENGTH_S 4 -#define SW_LO_SPEED_DRIVE_STRENGTH_S 0 - #define REG_SW_IBA_STATUS__4 0x0110 #define SW_IBA_REQ BIT(31) @@ -166,13 +153,6 @@ #define SW_DOUBLE_TAG BIT(7) #define SW_RESET BIT(1) -#define REG_SW_MAC_ADDR_0 0x0302 -#define REG_SW_MAC_ADDR_1 0x0303 -#define REG_SW_MAC_ADDR_2 0x0304 -#define REG_SW_MAC_ADDR_3 0x0305 -#define REG_SW_MAC_ADDR_4 0x0306 -#define REG_SW_MAC_ADDR_5 0x0307 - #define REG_SW_MTU__2 0x0308 #define REG_SW_MTU_MASK GENMASK(13, 0) diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c new file mode 100644 index 0000000000..8b2f5be667 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2023 Pengutronix, Oleksij Rempel + +#include "ksz9477.h" +#include "ksz9477_reg.h" +#include "ksz_common.h" + +#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0) +#define KSZ9477_MAX_TC 7 + +/** + * ksz9477_flower_parse_key_l2 - Parse Layer 2 key from flow rule and configure + * ACL entries accordingly. + * @dev: Pointer to the ksz_device. + * @port: Port number. + * @extack: Pointer to the netlink_ext_ack. + * @rule: Pointer to the flow_rule. + * @cookie: The cookie to associate with the entry. + * @prio: The priority of the entry. + * + * This function parses the Layer 2 key from the flow rule and configures + * the corresponding ACL entries. It checks for unsupported offloads and + * available entries before proceeding with the configuration. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int ksz9477_flower_parse_key_l2(struct ksz_device *dev, int port, + struct netlink_ext_ack *extack, + struct flow_rule *rule, + unsigned long cookie, u32 prio) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct flow_match_eth_addrs ematch; + struct ksz9477_acl_entries *acles; + int required_entries; + u8 *src_mac = NULL; + u8 *dst_mac = NULL; + u16 ethtype = 0; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + + if (match.key->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, + "ethernet type mask must be a full mask"); + return -EINVAL; + } + + ethtype = be16_to_cpu(match.key->n_proto); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + flow_rule_match_eth_addrs(rule, &ematch); + + if (!is_zero_ether_addr(ematch.key->src)) { + if (!is_broadcast_ether_addr(ematch.mask->src)) + goto not_full_mask_err; + + src_mac = ematch.key->src; + } + + if (!is_zero_ether_addr(ematch.key->dst)) { + if (!is_broadcast_ether_addr(ematch.mask->dst)) + goto not_full_mask_err; + + dst_mac = ematch.key->dst; + } + } + + acles = &acl->acles; + /* ACL supports only one MAC per entry */ + required_entries = src_mac && dst_mac ? 2 : 1; + + /* Check if there are enough available entries */ + if (acles->entries_count + required_entries > KSZ9477_ACL_MAX_ENTRIES) { + NL_SET_ERR_MSG_MOD(extack, "ACL entry limit reached"); + return -EOPNOTSUPP; + } + + ksz9477_acl_match_process_l2(dev, port, ethtype, src_mac, dst_mac, + cookie, prio); + + return 0; + +not_full_mask_err: + NL_SET_ERR_MSG_MOD(extack, "MAC address mask must be a full mask"); + return -EOPNOTSUPP; +} + +/** + * ksz9477_flower_parse_key - Parse flow rule keys for a specified port on a + * ksz_device. + * @dev: The ksz_device instance. + * @port: The port number to parse the flow rule keys for. + * @extack: The netlink extended ACK for reporting errors. + * @rule: The flow_rule to parse. + * @cookie: The cookie to associate with the entry. + * @prio: The priority of the entry. + * + * This function checks if the used keys in the flow rule are supported by + * the device and parses the L2 keys if they match. If unsupported keys are + * used, an error message is set in the extended ACK. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int ksz9477_flower_parse_key(struct ksz_device *dev, int port, + struct netlink_ext_ack *extack, + struct flow_rule *rule, + unsigned long cookie, u32 prio) +{ + struct flow_dissector *dissector = rule->match.dissector; + int ret; + + if (dissector->used_keys & + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule, + cookie, prio); + if (ret) + return ret; + } + + return 0; +} + +/** + * ksz9477_flower_parse_action - Parse flow rule actions for a specified port + * on a ksz_device. + * @dev: The ksz_device instance. + * @port: The port number to parse the flow rule actions for. + * @extack: The netlink extended ACK for reporting errors. + * @cls: The flow_cls_offload instance containing the flow rule. + * @entry_idx: The index of the ACL entry to store the action. + * + * This function checks if the actions in the flow rule are supported by + * the device. Currently, only actions that change priorities are supported. + * If unsupported actions are encountered, an error message is set in the + * extended ACK. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int ksz9477_flower_parse_action(struct ksz_device *dev, int port, + struct netlink_ext_ack *extack, + struct flow_cls_offload *cls, + int entry_idx) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + const struct flow_action_entry *act; + struct ksz9477_acl_entry *entry; + bool prio_force = false; + u8 prio_val = 0; + int i; + + if (TC_H_MIN(cls->classid)) { + NL_SET_ERR_MSG_MOD(extack, "hw_tc is not supported. Use: action skbedit prio"); + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_PRIORITY: + if (act->priority > KSZ9477_MAX_TC) { + NL_SET_ERR_MSG_MOD(extack, "Priority value is too high"); + return -EOPNOTSUPP; + } + prio_force = true; + prio_val = act->priority; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "action not supported"); + return -EOPNOTSUPP; + } + } + + /* pick entry to store action */ + entry = &acl->acles.entries[entry_idx]; + + ksz9477_acl_action_rule_cfg(entry->entry, prio_force, prio_val); + ksz9477_acl_processing_rule_set_action(entry->entry, entry_idx); + + return 0; +} + +/** + * ksz9477_cls_flower_add - Add a flow classification rule for a specified port + * on a ksz_device. + * @ds: The DSA switch instance. + * @port: The port number to add the flow classification rule to. + * @cls: The flow_cls_offload instance containing the flow rule. + * @ingress: A flag indicating if the rule is applied on the ingress path. + * + * This function adds a flow classification rule for a specified port on a + * ksz_device. It checks if the ACL offloading is supported and parses the flow + * keys and actions. If the ACL is not supported, it returns an error. If there + * are unprocessed entries, it parses the action for the rule. + * + * Returns: 0 on success or a negative error code on failure. + */ +int ksz9477_cls_flower_add(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct ksz_device *dev = ds->priv; + struct ksz9477_acl_priv *acl; + int action_entry_idx; + int ret; + + acl = dev->ports[port].acl_priv; + + if (!acl) { + NL_SET_ERR_MSG_MOD(extack, "ACL offloading is not supported"); + return -EOPNOTSUPP; + } + + /* A complex rule set can take multiple entries. Use first entry + * to store the action. + */ + action_entry_idx = acl->acles.entries_count; + + ret = ksz9477_flower_parse_key(dev, port, extack, rule, cls->cookie, + cls->common.prio); + if (ret) + return ret; + + ret = ksz9477_flower_parse_action(dev, port, extack, cls, + action_entry_idx); + if (ret) + return ret; + + ret = ksz9477_sort_acl_entries(dev, port); + if (ret) + return ret; + + return ksz9477_acl_write_list(dev, port); +} + +/** + * ksz9477_cls_flower_del - Remove a flow classification rule for a specified + * port on a ksz_device. + * @ds: The DSA switch instance. + * @port: The port number to remove the flow classification rule from. + * @cls: The flow_cls_offload instance containing the flow rule. + * @ingress: A flag indicating if the rule is applied on the ingress path. + * + * This function removes a flow classification rule for a specified port on a + * ksz_device. It checks if the ACL is initialized, and if not, returns an + * error. If the ACL is initialized, it removes entries with the specified + * cookie and rewrites the ACL list. + * + * Returns: 0 on success or a negative error code on failure. + */ +int ksz9477_cls_flower_del(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + unsigned long cookie = cls->cookie; + struct ksz_device *dev = ds->priv; + struct ksz9477_acl_priv *acl; + + acl = dev->ports[port].acl_priv; + + if (!acl) + return -EOPNOTSUPP; + + ksz9477_acl_remove_entries(dev, port, &acl->acles, cookie); + + return ksz9477_acl_write_list(dev, port); +} diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 286e20f340..ff4b39601c 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -186,6 +187,72 @@ static const struct ksz_mib_names ksz9477_mib_names[] = { { 0x83, "tx_discards" }, }; +struct ksz_driver_strength_prop { + const char *name; + int offset; + int value; +}; + +enum ksz_driver_strength_type { + KSZ_DRIVER_STRENGTH_HI, + KSZ_DRIVER_STRENGTH_LO, + KSZ_DRIVER_STRENGTH_IO, +}; + +/** + * struct ksz_drive_strength - drive strength mapping + * @reg_val: register value + * @microamp: microamp value + */ +struct ksz_drive_strength { + u32 reg_val; + u32 microamp; +}; + +/* ksz9477_drive_strengths - Drive strength mapping for KSZ9477 variants + * + * This values are not documented in KSZ9477 variants but confirmed by + * Microchip that KSZ9477, KSZ9567, KSZ8567, KSZ9897, KSZ9896, KSZ9563, KSZ9893 + * and KSZ8563 are using same register (drive strength) settings like KSZ8795. + * + * Documentation in KSZ8795CLX provides more information with some + * recommendations: + * - for high speed signals + * 1. 4 mA or 8 mA is often used for MII, RMII, and SPI interface with using + * 2.5V or 3.3V VDDIO. + * 2. 12 mA or 16 mA is often used for MII, RMII, and SPI interface with + * using 1.8V VDDIO. + * 3. 20 mA or 24 mA is often used for GMII/RGMII interface with using 2.5V + * or 3.3V VDDIO. + * 4. 28 mA is often used for GMII/RGMII interface with using 1.8V VDDIO. + * 5. In same interface, the heavy loading should use higher one of the + * drive current strength. + * - for low speed signals + * 1. 3.3V VDDIO, use either 4 mA or 8 mA. + * 2. 2.5V VDDIO, use either 8 mA or 12 mA. + * 3. 1.8V VDDIO, use either 12 mA or 16 mA. + * 4. If it is heavy loading, can use higher drive current strength. + */ +static const struct ksz_drive_strength ksz9477_drive_strengths[] = { + { SW_DRIVE_STRENGTH_2MA, 2000 }, + { SW_DRIVE_STRENGTH_4MA, 4000 }, + { SW_DRIVE_STRENGTH_8MA, 8000 }, + { SW_DRIVE_STRENGTH_12MA, 12000 }, + { SW_DRIVE_STRENGTH_16MA, 16000 }, + { SW_DRIVE_STRENGTH_20MA, 20000 }, + { SW_DRIVE_STRENGTH_24MA, 24000 }, + { SW_DRIVE_STRENGTH_28MA, 28000 }, +}; + +/* ksz8830_drive_strengths - Drive strength mapping for KSZ8830, KSZ8873, .. + * variants. + * This values are documented in KSZ8873 and KSZ8863 datasheets. + */ +static const struct ksz_drive_strength ksz8830_drive_strengths[] = { + { 0, 8000 }, + { KSZ8873_DRIVE_STRENGTH_16MA, 16000 }, +}; + static const struct ksz_dev_ops ksz8_dev_ops = { .setup = ksz8_setup, .get_port_addr = ksz8_get_port_addr, @@ -252,6 +319,9 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .mdb_del = ksz9477_mdb_del, .change_mtu = ksz9477_change_mtu, .phylink_mac_link_up = ksz9477_phylink_mac_link_up, + .get_wol = ksz9477_get_wol, + .set_wol = ksz9477_set_wol, + .wol_pre_shutdown = ksz9477_wol_pre_shutdown, .config_cpu_port = ksz9477_config_cpu_port, .tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc, .enable_stp_addr = ksz9477_enable_stp_addr, @@ -298,6 +368,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = { }; static const u16 ksz8795_regs[] = { + [REG_SW_MAC_ADDR] = 0x68, [REG_IND_CTRL_0] = 0x6E, [REG_IND_DATA_8] = 0x70, [REG_IND_DATA_CHECK] = 0x72, @@ -373,6 +444,7 @@ static const u8 ksz8795_shifts[] = { }; static const u16 ksz8863_regs[] = { + [REG_SW_MAC_ADDR] = 0x70, [REG_IND_CTRL_0] = 0x79, [REG_IND_DATA_8] = 0x7B, [REG_IND_DATA_CHECK] = 0x7B, @@ -426,6 +498,7 @@ static u8 ksz8863_shifts[] = { }; static const u16 ksz9477_regs[] = { + [REG_SW_MAC_ADDR] = 0x0302, [P_STP_CTRL] = 0x0B04, [S_START_CTRL] = 0x0300, [S_BROADCAST_CTRL] = 0x0332, @@ -1876,14 +1949,14 @@ static int ksz_irq_phy_setup(struct ksz_device *dev) ret = irq; goto out; } - ds->slave_mii_bus->irq[phy] = irq; + ds->user_mii_bus->irq[phy] = irq; } } return 0; out: while (phy--) if (BIT(phy) & ds->phys_mii_mask) - irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); + irq_dispose_mapping(ds->user_mii_bus->irq[phy]); return ret; } @@ -1895,7 +1968,7 @@ static void ksz_irq_phy_free(struct ksz_device *dev) for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) if (BIT(phy) & ds->phys_mii_mask) - irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); + irq_dispose_mapping(ds->user_mii_bus->irq[phy]); } static int ksz_mdio_register(struct ksz_device *dev) @@ -1918,12 +1991,12 @@ static int ksz_mdio_register(struct ksz_device *dev) bus->priv = dev; bus->read = ksz_sw_mdio_read; bus->write = ksz_sw_mdio_write; - bus->name = "ksz slave smi"; + bus->name = "ksz user smi"; snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); bus->parent = ds->dev; bus->phy_mask = ~ds->phys_mii_mask; - ds->slave_mii_bus = bus; + ds->user_mii_bus = bus; if (dev->irq > 0) { ret = ksz_irq_phy_setup(dev); @@ -2275,7 +2348,7 @@ static void ksz_mib_read_work(struct work_struct *work) if (!p->read) { const struct dsa_port *dp = dsa_to_port(dev->ds, i); - if (!netif_carrier_ok(dp->slave)) + if (!netif_carrier_ok(dp->user)) mib->cnt_ptr = dev->info->reg_mib_cnt; } port_r_cnt(dev, i); @@ -2395,7 +2468,7 @@ static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, mutex_lock(&mib->cnt_mutex); /* Only read dropped counters if no link. */ - if (!netif_carrier_ok(dp->slave)) + if (!netif_carrier_ok(dp->user)) mib->cnt_ptr = dev->info->reg_mib_cnt; port_r_cnt(dev, port); memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); @@ -2498,15 +2571,14 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port, return dev->dev_ops->mdb_del(dev, port, mdb, db); } -static int ksz_enable_port(struct dsa_switch *ds, int port, - struct phy_device *phy) +static int ksz_port_setup(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; if (!dsa_is_user_port(ds, port)) return 0; - /* setup slave port */ + /* setup user port */ dev->dev_ops->port_setup(dev, port, false); /* port_stp_state_set() will be called after to enable the port so @@ -2562,6 +2634,23 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) ksz_update_port_member(dev, port); } +static void ksz_port_teardown(struct dsa_switch *ds, int port) +{ + struct ksz_device *dev = ds->priv; + + switch (dev->chip_id) { + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + if (dsa_is_user_port(ds, port)) + ksz9477_port_acl_free(dev, port); + } +} + static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack) @@ -3115,6 +3204,44 @@ static int ksz_switch_detect(struct ksz_device *dev) return 0; } +static int ksz_cls_flower_add(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct ksz_device *dev = ds->priv; + + switch (dev->chip_id) { + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + return ksz9477_cls_flower_add(ds, port, cls, ingress); + } + + return -EOPNOTSUPP; +} + +static int ksz_cls_flower_del(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct ksz_device *dev = ds->priv; + + switch (dev->chip_id) { + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + return ksz9477_cls_flower_del(ds, port, cls, ingress); + } + + return -EOPNOTSUPP; +} + /* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth * is converted to Hex-decimal using the successive multiplication method. On * every step, integer part is taken and decimal part is carry forwarded. @@ -3427,6 +3554,224 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port, } } +static void ksz_get_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct ksz_device *dev = ds->priv; + + if (dev->dev_ops->get_wol) + dev->dev_ops->get_wol(dev, port, wol); +} + +static int ksz_set_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct ksz_device *dev = ds->priv; + + if (dev->dev_ops->set_wol) + return dev->dev_ops->set_wol(dev, port, wol); + + return -EOPNOTSUPP; +} + +static int ksz_port_set_mac_address(struct dsa_switch *ds, int port, + const unsigned char *addr) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ethtool_wolinfo wol; + + if (dp->hsr_dev) { + dev_err(ds->dev, + "Cannot change MAC address on port %d with active HSR offload\n", + port); + return -EBUSY; + } + + ksz_get_wol(ds, dp->index, &wol); + if (wol.wolopts & WAKE_MAGIC) { + dev_err(ds->dev, + "Cannot change MAC address on port %d with active Wake on Magic Packet\n", + port); + return -EBUSY; + } + + return 0; +} + +/** + * ksz_is_port_mac_global_usable - Check if the MAC address on a given port + * can be used as a global address. + * @ds: Pointer to the DSA switch structure. + * @port: The port number on which the MAC address is to be checked. + * + * This function examines the MAC address set on the specified port and + * determines if it can be used as a global address for the switch. + * + * Return: true if the port's MAC address can be used as a global address, false + * otherwise. + */ +bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port) +{ + struct net_device *user = dsa_to_port(ds, port)->user; + const unsigned char *addr = user->dev_addr; + struct ksz_switch_macaddr *switch_macaddr; + struct ksz_device *dev = ds->priv; + + ASSERT_RTNL(); + + switch_macaddr = dev->switch_macaddr; + if (switch_macaddr && !ether_addr_equal(switch_macaddr->addr, addr)) + return false; + + return true; +} + +/** + * ksz_switch_macaddr_get - Program the switch's MAC address register. + * @ds: DSA switch instance. + * @port: Port number. + * @extack: Netlink extended acknowledgment. + * + * This function programs the switch's MAC address register with the MAC address + * of the requesting user port. This single address is used by the switch for + * multiple features like HSR self-address filtering and WoL. Other user ports + * can share ownership of this address as long as their MAC address is the same. + * The MAC addresses of user ports must not change while they have ownership of + * the switch MAC address. + * + * Return: 0 on success, or other error codes on failure. + */ +int ksz_switch_macaddr_get(struct dsa_switch *ds, int port, + struct netlink_ext_ack *extack) +{ + struct net_device *user = dsa_to_port(ds, port)->user; + const unsigned char *addr = user->dev_addr; + struct ksz_switch_macaddr *switch_macaddr; + struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; + int i, ret; + + /* Make sure concurrent MAC address changes are blocked */ + ASSERT_RTNL(); + + switch_macaddr = dev->switch_macaddr; + if (switch_macaddr) { + if (!ether_addr_equal(switch_macaddr->addr, addr)) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Switch already configured for MAC address %pM", + switch_macaddr->addr); + return -EBUSY; + } + + refcount_inc(&switch_macaddr->refcount); + return 0; + } + + switch_macaddr = kzalloc(sizeof(*switch_macaddr), GFP_KERNEL); + if (!switch_macaddr) + return -ENOMEM; + + ether_addr_copy(switch_macaddr->addr, addr); + refcount_set(&switch_macaddr->refcount, 1); + dev->switch_macaddr = switch_macaddr; + + /* Program the switch MAC address to hardware */ + for (i = 0; i < ETH_ALEN; i++) { + ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, addr[i]); + if (ret) + goto macaddr_drop; + } + + return 0; + +macaddr_drop: + dev->switch_macaddr = NULL; + refcount_set(&switch_macaddr->refcount, 0); + kfree(switch_macaddr); + + return ret; +} + +void ksz_switch_macaddr_put(struct dsa_switch *ds) +{ + struct ksz_switch_macaddr *switch_macaddr; + struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; + int i; + + /* Make sure concurrent MAC address changes are blocked */ + ASSERT_RTNL(); + + switch_macaddr = dev->switch_macaddr; + if (!refcount_dec_and_test(&switch_macaddr->refcount)) + return; + + for (i = 0; i < ETH_ALEN; i++) + ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, 0); + + dev->switch_macaddr = NULL; + kfree(switch_macaddr); +} + +static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr, + struct netlink_ext_ack *extack) +{ + struct ksz_device *dev = ds->priv; + enum hsr_version ver; + int ret; + + ret = hsr_get_version(hsr, &ver); + if (ret) + return ret; + + if (dev->chip_id != KSZ9477_CHIP_ID) { + NL_SET_ERR_MSG_MOD(extack, "Chip does not support HSR offload"); + return -EOPNOTSUPP; + } + + /* KSZ9477 can support HW offloading of only 1 HSR device */ + if (dev->hsr_dev && hsr != dev->hsr_dev) { + NL_SET_ERR_MSG_MOD(extack, "Offload supported for a single HSR"); + return -EOPNOTSUPP; + } + + /* KSZ9477 only supports HSR v0 and v1 */ + if (!(ver == HSR_V0 || ver == HSR_V1)) { + NL_SET_ERR_MSG_MOD(extack, "Only HSR v0 and v1 supported"); + return -EOPNOTSUPP; + } + + /* Self MAC address filtering, to avoid frames traversing + * the HSR ring more than once. + */ + ret = ksz_switch_macaddr_get(ds, port, extack); + if (ret) + return ret; + + ksz9477_hsr_join(ds, port, hsr); + dev->hsr_dev = hsr; + dev->hsr_ports |= BIT(port); + + return 0; +} + +static int ksz_hsr_leave(struct dsa_switch *ds, int port, + struct net_device *hsr) +{ + struct ksz_device *dev = ds->priv; + + WARN_ON(dev->chip_id != KSZ9477_CHIP_ID); + + ksz9477_hsr_leave(ds, port, hsr); + dev->hsr_ports &= ~BIT(port); + if (!dev->hsr_ports) + dev->hsr_dev = NULL; + + ksz_switch_macaddr_put(ds); + + return 0; +} + static const struct dsa_switch_ops ksz_switch_ops = { .get_tag_protocol = ksz_get_tag_protocol, .connect_tag_protocol = ksz_connect_tag_protocol, @@ -3439,14 +3784,18 @@ static const struct dsa_switch_ops ksz_switch_ops = { .phylink_mac_config = ksz_phylink_mac_config, .phylink_mac_link_up = ksz_phylink_mac_link_up, .phylink_mac_link_down = ksz_mac_link_down, - .port_enable = ksz_enable_port, + .port_setup = ksz_port_setup, .set_ageing_time = ksz_set_ageing_time, .get_strings = ksz_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, .get_sset_count = ksz_sset_count, .port_bridge_join = ksz_port_bridge_join, .port_bridge_leave = ksz_port_bridge_leave, + .port_hsr_join = ksz_hsr_join, + .port_hsr_leave = ksz_hsr_leave, + .port_set_mac_address = ksz_port_set_mac_address, .port_stp_state_set = ksz_port_stp_state_set, + .port_teardown = ksz_port_teardown, .port_pre_bridge_flags = ksz_port_pre_bridge_flags, .port_bridge_flags = ksz_port_bridge_flags, .port_fast_age = ksz_port_fast_age, @@ -3464,11 +3813,15 @@ static const struct dsa_switch_ops ksz_switch_ops = { .get_pause_stats = ksz_get_pause_stats, .port_change_mtu = ksz_change_mtu, .port_max_mtu = ksz_max_mtu, + .get_wol = ksz_get_wol, + .set_wol = ksz_set_wol, .get_ts_info = ksz_get_ts_info, .port_hwtstamp_get = ksz_hwtstamp_get, .port_hwtstamp_set = ksz_hwtstamp_set, .port_txtstamp = ksz_port_txtstamp, .port_rxtstamp = ksz_port_rxtstamp, + .cls_flower_add = ksz_cls_flower_add, + .cls_flower_del = ksz_cls_flower_del, .port_setup_tc = ksz_setup_tc, .get_mac_eee = ksz_get_mac_eee, .set_mac_eee = ksz_set_mac_eee, @@ -3501,6 +3854,30 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) } EXPORT_SYMBOL(ksz_switch_alloc); +/** + * ksz_switch_shutdown - Shutdown routine for the switch device. + * @dev: The switch device structure. + * + * This function is responsible for initiating a shutdown sequence for the + * switch device. It invokes the reset operation defined in the device + * operations, if available, to reset the switch. Subsequently, it calls the + * DSA framework's shutdown function to ensure a proper shutdown of the DSA + * switch. + */ +void ksz_switch_shutdown(struct ksz_device *dev) +{ + bool wol_enabled = false; + + if (dev->dev_ops->wol_pre_shutdown) + dev->dev_ops->wol_pre_shutdown(dev, &wol_enabled); + + if (dev->dev_ops->reset && !wol_enabled) + dev->dev_ops->reset(dev); + + dsa_switch_shutdown(dev->ds); +} +EXPORT_SYMBOL(ksz_switch_shutdown); + static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num, struct device_node *port_dn) { @@ -3538,6 +3915,245 @@ static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num, dev->ports[port_num].rgmii_tx_val = tx_delay; } +/** + * ksz_drive_strength_to_reg() - Convert drive strength value to corresponding + * register value. + * @array: The array of drive strength values to search. + * @array_size: The size of the array. + * @microamp: The drive strength value in microamp to be converted. + * + * This function searches the array of drive strength values for the given + * microamp value and returns the corresponding register value for that drive. + * + * Returns: If found, the corresponding register value for that drive strength + * is returned. Otherwise, -EINVAL is returned indicating an invalid value. + */ +static int ksz_drive_strength_to_reg(const struct ksz_drive_strength *array, + size_t array_size, int microamp) +{ + int i; + + for (i = 0; i < array_size; i++) { + if (array[i].microamp == microamp) + return array[i].reg_val; + } + + return -EINVAL; +} + +/** + * ksz_drive_strength_error() - Report invalid drive strength value + * @dev: ksz device + * @array: The array of drive strength values to search. + * @array_size: The size of the array. + * @microamp: Invalid drive strength value in microamp + * + * This function logs an error message when an unsupported drive strength value + * is detected. It lists out all the supported drive strength values for + * reference in the error message. + */ +static void ksz_drive_strength_error(struct ksz_device *dev, + const struct ksz_drive_strength *array, + size_t array_size, int microamp) +{ + char supported_values[100]; + size_t remaining_size; + int added_len; + char *ptr; + int i; + + remaining_size = sizeof(supported_values); + ptr = supported_values; + + for (i = 0; i < array_size; i++) { + added_len = snprintf(ptr, remaining_size, + i == 0 ? "%d" : ", %d", array[i].microamp); + + if (added_len >= remaining_size) + break; + + ptr += added_len; + remaining_size -= added_len; + } + + dev_err(dev->dev, "Invalid drive strength %d, supported values are %s\n", + microamp, supported_values); +} + +/** + * ksz9477_drive_strength_write() - Set the drive strength for specific KSZ9477 + * chip variants. + * @dev: ksz device + * @props: Array of drive strength properties to be applied + * @num_props: Number of properties in the array + * + * This function configures the drive strength for various KSZ9477 chip variants + * based on the provided properties. It handles chip-specific nuances and + * ensures only valid drive strengths are written to the respective chip. + * + * Return: 0 on successful configuration, a negative error code on failure. + */ +static int ksz9477_drive_strength_write(struct ksz_device *dev, + struct ksz_driver_strength_prop *props, + int num_props) +{ + size_t array_size = ARRAY_SIZE(ksz9477_drive_strengths); + int i, ret, reg; + u8 mask = 0; + u8 val = 0; + + if (props[KSZ_DRIVER_STRENGTH_IO].value != -1) + dev_warn(dev->dev, "%s is not supported by this chip variant\n", + props[KSZ_DRIVER_STRENGTH_IO].name); + + if (dev->chip_id == KSZ8795_CHIP_ID || + dev->chip_id == KSZ8794_CHIP_ID || + dev->chip_id == KSZ8765_CHIP_ID) + reg = KSZ8795_REG_SW_CTRL_20; + else + reg = KSZ9477_REG_SW_IO_STRENGTH; + + for (i = 0; i < num_props; i++) { + if (props[i].value == -1) + continue; + + ret = ksz_drive_strength_to_reg(ksz9477_drive_strengths, + array_size, props[i].value); + if (ret < 0) { + ksz_drive_strength_error(dev, ksz9477_drive_strengths, + array_size, props[i].value); + return ret; + } + + mask |= SW_DRIVE_STRENGTH_M << props[i].offset; + val |= ret << props[i].offset; + } + + return ksz_rmw8(dev, reg, mask, val); +} + +/** + * ksz8830_drive_strength_write() - Set the drive strength configuration for + * KSZ8830 compatible chip variants. + * @dev: ksz device + * @props: Array of drive strength properties to be set + * @num_props: Number of properties in the array + * + * This function applies the specified drive strength settings to KSZ8830 chip + * variants (KSZ8873, KSZ8863). + * It ensures the configurations align with what the chip variant supports and + * warns or errors out on unsupported settings. + * + * Return: 0 on success, error code otherwise + */ +static int ksz8830_drive_strength_write(struct ksz_device *dev, + struct ksz_driver_strength_prop *props, + int num_props) +{ + size_t array_size = ARRAY_SIZE(ksz8830_drive_strengths); + int microamp; + int i, ret; + + for (i = 0; i < num_props; i++) { + if (props[i].value == -1 || i == KSZ_DRIVER_STRENGTH_IO) + continue; + + dev_warn(dev->dev, "%s is not supported by this chip variant\n", + props[i].name); + } + + microamp = props[KSZ_DRIVER_STRENGTH_IO].value; + ret = ksz_drive_strength_to_reg(ksz8830_drive_strengths, array_size, + microamp); + if (ret < 0) { + ksz_drive_strength_error(dev, ksz8830_drive_strengths, + array_size, microamp); + return ret; + } + + return ksz_rmw8(dev, KSZ8873_REG_GLOBAL_CTRL_12, + KSZ8873_DRIVE_STRENGTH_16MA, ret); +} + +/** + * ksz_parse_drive_strength() - Extract and apply drive strength configurations + * from device tree properties. + * @dev: ksz device + * + * This function reads the specified drive strength properties from the + * device tree, validates against the supported chip variants, and sets + * them accordingly. An error should be critical here, as the drive strength + * settings are crucial for EMI compliance. + * + * Return: 0 on success, error code otherwise + */ +static int ksz_parse_drive_strength(struct ksz_device *dev) +{ + struct ksz_driver_strength_prop of_props[] = { + [KSZ_DRIVER_STRENGTH_HI] = { + .name = "microchip,hi-drive-strength-microamp", + .offset = SW_HI_SPEED_DRIVE_STRENGTH_S, + .value = -1, + }, + [KSZ_DRIVER_STRENGTH_LO] = { + .name = "microchip,lo-drive-strength-microamp", + .offset = SW_LO_SPEED_DRIVE_STRENGTH_S, + .value = -1, + }, + [KSZ_DRIVER_STRENGTH_IO] = { + .name = "microchip,io-drive-strength-microamp", + .offset = 0, /* don't care */ + .value = -1, + }, + }; + struct device_node *np = dev->dev->of_node; + bool have_any_prop = false; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(of_props); i++) { + ret = of_property_read_u32(np, of_props[i].name, + &of_props[i].value); + if (ret && ret != -EINVAL) + dev_warn(dev->dev, "Failed to read %s\n", + of_props[i].name); + if (ret) + continue; + + have_any_prop = true; + } + + if (!have_any_prop) + return 0; + + switch (dev->chip_id) { + case KSZ8830_CHIP_ID: + return ksz8830_drive_strength_write(dev, of_props, + ARRAY_SIZE(of_props)); + case KSZ8795_CHIP_ID: + case KSZ8794_CHIP_ID: + case KSZ8765_CHIP_ID: + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + return ksz9477_drive_strength_write(dev, of_props, + ARRAY_SIZE(of_props)); + default: + for (i = 0; i < ARRAY_SIZE(of_props); i++) { + if (of_props[i].value == -1) + continue; + + dev_warn(dev->dev, "%s is not supported by this chip variant\n", + of_props[i].name); + } + } + + return 0; +} + int ksz_switch_register(struct ksz_device *dev) { const struct ksz_chip_data *info; @@ -3620,6 +4236,10 @@ int ksz_switch_register(struct ksz_device *dev) for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; if (dev->dev->of_node) { + ret = ksz_parse_drive_strength(dev); + if (ret) + return ret; + ret = of_get_phy_mode(dev->dev->of_node, &interface); if (ret == 0) dev->compat_interface = interface; @@ -3651,6 +4271,9 @@ int ksz_switch_register(struct ksz_device *dev) dev_err(dev->dev, "inconsistent synclko settings\n"); return -EINVAL; } + + dev->wakeup_source = of_property_read_bool(dev->dev->of_node, + "wakeup-source"); } ret = dsa_register_switch(dev->ds); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index a4de58847d..b7e8a403a1 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -101,6 +101,11 @@ struct ksz_ptp_irq { int num; }; +struct ksz_switch_macaddr { + unsigned char addr[ETH_ALEN]; + refcount_t refcount; +}; + struct ksz_port { bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ bool learning; @@ -117,6 +122,7 @@ struct ksz_port { u32 rgmii_tx_val; u32 rgmii_rx_val; struct ksz_device *ksz_dev; + void *acl_priv; struct ksz_irq pirq; u8 num; #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP) @@ -157,6 +163,7 @@ struct ksz_device { phy_interface_t compat_interface; bool synclko_125; bool synclko_disable; + bool wakeup_source; struct vlan_table *vlan_cache; @@ -169,6 +176,10 @@ struct ksz_device { struct mutex lock_irq; /* IRQ Access */ struct ksz_irq girq; struct ksz_ptp_data ptp_data; + + struct ksz_switch_macaddr *switch_macaddr; + struct net_device *hsr_dev; /* HSR */ + u8 hsr_ports; }; /* List of supported models */ @@ -211,6 +222,7 @@ enum ksz_chip_id { }; enum ksz_regs { + REG_SW_MAC_ADDR, REG_IND_CTRL_0, REG_IND_DATA_8, REG_IND_DATA_CHECK, @@ -362,6 +374,11 @@ struct ksz_dev_ops { int duplex, bool tx_pause, bool rx_pause); void (*setup_rgmii_delay)(struct ksz_device *dev, int port); int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val); + void (*get_wol)(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); + int (*set_wol)(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); + void (*wol_pre_shutdown)(struct ksz_device *dev, bool *wol_enabled); void (*config_cpu_port)(struct dsa_switch *ds); int (*enable_stp_addr)(struct ksz_device *dev); int (*reset)(struct ksz_device *dev); @@ -374,12 +391,17 @@ int ksz_switch_register(struct ksz_device *dev); void ksz_switch_remove(struct ksz_device *dev); void ksz_init_mib_timer(struct ksz_device *dev); +bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port); void ksz_r_mib_stats64(struct ksz_device *dev, int port); void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port); void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state); bool ksz_get_gbit(struct ksz_device *dev, int port); phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit); extern const struct ksz_chip_data ksz_switch_chips[]; +int ksz_switch_macaddr_get(struct dsa_switch *ds, int port, + struct netlink_ext_ack *extack); +void ksz_switch_macaddr_put(struct dsa_switch *ds); +void ksz_switch_shutdown(struct ksz_device *dev); /* Common register access functions */ static inline struct regmap *ksz_regmap_8(struct ksz_device *dev) @@ -689,6 +711,26 @@ static inline int is_lan937x(struct ksz_device *dev) #define KSZ8_LEGAL_PACKET_SIZE 1518 #define KSZ9477_MAX_FRAME_SIZE 9000 +#define KSZ8873_REG_GLOBAL_CTRL_12 0x0e +/* Drive Strength of I/O Pad + * 0: 8mA, 1: 16mA + */ +#define KSZ8873_DRIVE_STRENGTH_16MA BIT(6) + +#define KSZ8795_REG_SW_CTRL_20 0xa3 +#define KSZ9477_REG_SW_IO_STRENGTH 0x010d +#define SW_DRIVE_STRENGTH_M 0x7 +#define SW_DRIVE_STRENGTH_2MA 0 +#define SW_DRIVE_STRENGTH_4MA 1 +#define SW_DRIVE_STRENGTH_8MA 2 +#define SW_DRIVE_STRENGTH_12MA 3 +#define SW_DRIVE_STRENGTH_16MA 4 +#define SW_DRIVE_STRENGTH_20MA 5 +#define SW_DRIVE_STRENGTH_24MA 6 +#define SW_DRIVE_STRENGTH_28MA 7 +#define SW_HI_SPEED_DRIVE_STRENGTH_S 4 +#define SW_LO_SPEED_DRIVE_STRENGTH_S 0 + #define KSZ9477_REG_PORT_OUT_RATE_0 0x0420 #define KSZ9477_OUT_RATE_NO_LIMIT 0 diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c index 4e22a695a6..1fe105913c 100644 --- a/drivers/net/dsa/microchip/ksz_ptp.c +++ b/drivers/net/dsa/microchip/ksz_ptp.c @@ -557,7 +557,7 @@ static void ksz_ptp_txtstamp_skb(struct ksz_device *dev, struct skb_shared_hwtstamps hwtstamps = {}; int ret; - /* timeout must include DSA master to transmit data, tstamp latency, + /* timeout must include DSA conduit to transmit data, tstamp latency, * IRQ latency and time for reading the time stamp. */ ret = wait_for_completion_timeout(&prt->tstamp_msg_comp, diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c index 2793384516..6f6d878e74 100644 --- a/drivers/net/dsa/microchip/ksz_spi.c +++ b/drivers/net/dsa/microchip/ksz_spi.c @@ -114,10 +114,7 @@ static void ksz_spi_shutdown(struct spi_device *spi) if (!dev) return; - if (dev->dev_ops->reset) - dev->dev_ops->reset(dev); - - dsa_switch_shutdown(dev->ds); + ksz_switch_shutdown(dev); spi_set_drvdata(spi, NULL); } diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c index 0a6a2fe34e..b74a230a3f 100644 --- a/drivers/net/dsa/mt7530-mmio.c +++ b/drivers/net/dsa/mt7530-mmio.c @@ -63,15 +63,12 @@ mt7988_probe(struct platform_device *pdev) return dsa_register_switch(priv->ds); } -static int -mt7988_remove(struct platform_device *pdev) +static void mt7988_remove(struct platform_device *pdev) { struct mt7530_priv *priv = platform_get_drvdata(pdev); if (priv) mt7530_remove_common(priv); - - return 0; } static void mt7988_shutdown(struct platform_device *pdev) @@ -88,7 +85,7 @@ static void mt7988_shutdown(struct platform_device *pdev) static struct platform_driver mt7988_platform_driver = { .probe = mt7988_probe, - .remove = mt7988_remove, + .remove_new = mt7988_remove, .shutdown = mt7988_shutdown, .driver = { .name = "mt7530-mmio", diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 035a34b50f..2333f6383b 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -836,8 +836,7 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, return; for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) - strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name, - ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", mt7530_mib[i].name); } static void @@ -1114,7 +1113,7 @@ mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) u32 val; /* When a new MTU is set, DSA always set the CPU port's MTU to the - * largest MTU of the slave ports. Because the switch only has a global + * largest MTU of the user ports. Because the switch only has a global * RX length register, only allowing CPU port here is enough. */ if (!dsa_is_cpu_port(ds, port)) @@ -2070,7 +2069,7 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv) unsigned int irq; irq = irq_create_mapping(priv->irq_domain, p); - ds->slave_mii_bus->irq[p] = irq; + ds->user_mii_bus->irq[p] = irq; } } } @@ -2164,7 +2163,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv) if (!bus) return -ENOMEM; - ds->slave_mii_bus = bus; + ds->user_mii_bus = bus; bus->priv = priv; bus->name = KBUILD_MODNAME "-mii"; snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); @@ -2201,20 +2200,20 @@ mt7530_setup(struct dsa_switch *ds) u32 id, val; int ret, i; - /* The parent node of master netdev which holds the common system + /* The parent node of conduit netdev which holds the common system * controller also is the container for two GMACs nodes representing * as two netdev instances. */ dsa_switch_for_each_cpu_port(cpu_dp, ds) { - dn = cpu_dp->master->dev.of_node->parent; + dn = cpu_dp->conduit->dev.of_node->parent; /* It doesn't matter which CPU port is found first, - * their masters should share the same parent OF node + * their conduits should share the same parent OF node */ break; } if (!dn) { - dev_err(ds->dev, "parent OF node of DSA master not found"); + dev_err(ds->dev, "parent OF node of DSA conduit not found"); return -EINVAL; } @@ -2489,7 +2488,7 @@ mt7531_setup(struct dsa_switch *ds) if (mt7531_dual_sgmii_supported(priv)) { priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII; - /* Let ds->slave_mii_bus be able to access external phy. */ + /* Let ds->user_mii_bus be able to access external phy. */ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, MT7531_EXT_P_MDC_11); mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, @@ -2718,7 +2717,7 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode, case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: dp = dsa_to_port(ds, port); - phydev = dp->slave->phydev; + phydev = dp->user->phydev; return mt7531_rgmii_setup(priv, port, interface, phydev); case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_NA: @@ -2824,15 +2823,6 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); } -static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs, - unsigned int mode, - phy_interface_t interface, - int speed, int duplex) -{ - if (pcs->ops->pcs_link_up) - pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex); -} - static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, @@ -2848,8 +2838,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, /* MT753x MAC works in 1G full duplex mode for all up-clocked * variants. */ - if (interface == PHY_INTERFACE_MODE_INTERNAL || - interface == PHY_INTERFACE_MODE_TRGMII || + if (interface == PHY_INTERFACE_MODE_TRGMII || (phy_interface_mode_is_8023z(interface))) { speed = SPEED_1000; duplex = DUPLEX_FULL; @@ -2921,8 +2910,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) return ret; mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPU_PORT_SETTING(priv->id)); - mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED, - interface, speed, DUPLEX_FULL); mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL, speed, DUPLEX_FULL, true, true); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index dc7f9b99f4..dd2cbffcfe 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2498,7 +2498,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, else member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED; - /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port + /* net/dsa/user.c will call dsa_port_vlan_add() for the affected port * and then the CPU port. Do not warn for duplicates for the CPU port. */ warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port); @@ -3545,7 +3545,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad, int err; if (!chip->info->ops->phy_read_c45) - return -EOPNOTSUPP; + return 0xffff; mv88e6xxx_reg_lock(chip); err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val); @@ -3731,7 +3731,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) return err; chip->ds = ds; - ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); + ds->user_mii_bus = mv88e6xxx_default_mdio_bus(chip); /* Since virtual bridges are mapped in the PVT, the number we support * depends on the physical switch topology. We need to let DSA figure diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 44383a03ef..c54d305a1d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -601,8 +601,8 @@ struct mv88e6xxx_ops { int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port, uint8_t *data); - int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port, - uint64_t *data); + size_t (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port, + uint64_t *data); /* SERDES registers for ethtool */ int (*serdes_get_regs_len)(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c index c31f0e54f1..d758a6c1b2 100644 --- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c +++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c @@ -208,7 +208,7 @@ static void mv88e639x_sgmii_pcs_pre_config(struct phylink_pcs *pcs, static int mv88e6390_erratum_3_14(struct mv88e639x_pcs *mpcs) { - const int lanes[] = { MV88E6390_PORT9_LANE0, MV88E6390_PORT9_LANE1, + static const int lanes[] = { MV88E6390_PORT9_LANE0, MV88E6390_PORT9_LANE1, MV88E6390_PORT9_LANE2, MV88E6390_PORT9_LANE3, MV88E6390_PORT10_LANE0, MV88E6390_PORT10_LANE1, MV88E6390_PORT10_LANE2, MV88E6390_PORT10_LANE3 }; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index ea17231dc3..56391e09b3 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -182,6 +182,10 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly) mv88e6xxx_reg_lock(chip); err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]); mv88e6xxx_reg_unlock(chip); + if (err) { + dev_err(chip->dev, "failed to write TAI status register\n"); + return; + } /* This is an external timestamp */ ev.type = PTP_CLOCK_EXTTS; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 3b4b42651f..01ea539407 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -177,8 +177,8 @@ static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip, return val; } -int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data) +size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) { struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port]; struct mv88e6352_serdes_hw_stat *stat; @@ -187,7 +187,7 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, err = mv88e6352_g2_scratch_port_has_serdes(chip, port); if (err <= 0) - return err; + return 0; BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) > ARRAY_SIZE(mv88e6xxx_port->serdes_stats)); @@ -429,8 +429,8 @@ static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane, return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32); } -int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data) +size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) { struct mv88e6390_serdes_hw_stat *stat; int lane; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index aac95cab46..ff5c3ab31e 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -127,13 +127,13 @@ unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); -int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data); +size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data); int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); -int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, - uint64_t *data); +size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data); int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port); void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p); diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 9a3e5ec169..61e9548773 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -42,22 +42,22 @@ static struct net_device *felix_classify_db(struct dsa_db db) } } -static int felix_cpu_port_for_master(struct dsa_switch *ds, - struct net_device *master) +static int felix_cpu_port_for_conduit(struct dsa_switch *ds, + struct net_device *conduit) { struct ocelot *ocelot = ds->priv; struct dsa_port *cpu_dp; int lag; - if (netif_is_lag_master(master)) { + if (netif_is_lag_master(conduit)) { mutex_lock(&ocelot->fwd_domain_lock); - lag = ocelot_bond_get_id(ocelot, master); + lag = ocelot_bond_get_id(ocelot, conduit); mutex_unlock(&ocelot->fwd_domain_lock); return lag; } - cpu_dp = master->dsa_ptr; + cpu_dp = conduit->dsa_ptr; return cpu_dp->index; } @@ -366,7 +366,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds, * is the mode through which frames can be injected from and extracted to an * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU * running Linux, and this forms a DSA setup together with the enetc or fman - * DSA master. + * DSA conduit. */ static void felix_npi_port_init(struct ocelot *ocelot, int port) { @@ -441,16 +441,16 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) return BIT(ocelot->num_phys_ports); } -static int felix_tag_npi_change_master(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack) +static int felix_tag_npi_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; struct ocelot *ocelot = ds->priv; - if (netif_is_lag_master(master)) { + if (netif_is_lag_master(conduit)) { NL_SET_ERR_MSG_MOD(extack, - "LAG DSA master only supported using ocelot-8021q"); + "LAG DSA conduit only supported using ocelot-8021q"); return -EOPNOTSUPP; } @@ -459,24 +459,24 @@ static int felix_tag_npi_change_master(struct dsa_switch *ds, int port, * come back up until they're all changed to the new one. */ dsa_switch_for_each_user_port(other_dp, ds) { - struct net_device *slave = other_dp->slave; + struct net_device *user = other_dp->user; - if (other_dp != dp && (slave->flags & IFF_UP) && - dsa_port_to_master(other_dp) != master) { + if (other_dp != dp && (user->flags & IFF_UP) && + dsa_port_to_conduit(other_dp) != conduit) { NL_SET_ERR_MSG_MOD(extack, - "Cannot change while old master still has users"); + "Cannot change while old conduit still has users"); return -EOPNOTSUPP; } } felix_npi_port_deinit(ocelot, ocelot->npi); - felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master)); + felix_npi_port_init(ocelot, felix_cpu_port_for_conduit(ds, conduit)); return 0; } /* Alternatively to using the NPI functionality, that same hardware MAC - * connected internally to the enetc or fman DSA master can be configured to + * connected internally to the enetc or fman DSA conduit can be configured to * use the software-defined tag_8021q frame format. As far as the hardware is * concerned, it thinks it is a "dumb switch" - the queues of the CPU port * module are now disconnected from it, but can still be accessed through @@ -486,7 +486,7 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { .setup = felix_tag_npi_setup, .teardown = felix_tag_npi_teardown, .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, - .change_master = felix_tag_npi_change_master, + .change_conduit = felix_tag_npi_change_conduit, }; static int felix_tag_8021q_setup(struct dsa_switch *ds) @@ -561,11 +561,11 @@ static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) return dsa_cpu_ports(ds); } -static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack) +static int felix_tag_8021q_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) { - int cpu = felix_cpu_port_for_master(ds, master); + int cpu = felix_cpu_port_for_conduit(ds, conduit); struct ocelot *ocelot = ds->priv; ocelot_port_unassign_dsa_8021q_cpu(ocelot, port); @@ -578,7 +578,7 @@ static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { .setup = felix_tag_8021q_setup, .teardown = felix_tag_8021q_teardown, .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, - .change_master = felix_tag_8021q_change_master, + .change_conduit = felix_tag_8021q_change_conduit, }; static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, @@ -741,14 +741,14 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port, !!felix->host_flood_mc_mask, true); } -static int felix_port_change_master(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack) +static int felix_port_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - return felix->tag_proto_ops->change_master(ds, port, master, extack); + return felix->tag_proto_ops->change_conduit(ds, port, conduit, extack); } static int felix_set_ageing_time(struct dsa_switch *ds, @@ -953,7 +953,7 @@ static int felix_lag_join(struct dsa_switch *ds, int port, if (!dsa_is_cpu_port(ds, port)) return 0; - return felix_port_change_master(ds, port, lag.dev, extack); + return felix_port_change_conduit(ds, port, lag.dev, extack); } static int felix_lag_leave(struct dsa_switch *ds, int port, @@ -967,7 +967,7 @@ static int felix_lag_leave(struct dsa_switch *ds, int port, if (!dsa_is_cpu_port(ds, port)) return 0; - return felix_port_change_master(ds, port, lag.dev, NULL); + return felix_port_change_conduit(ds, port, lag.dev, NULL); } static int felix_lag_change(struct dsa_switch *ds, int port) @@ -1116,10 +1116,10 @@ static int felix_port_enable(struct dsa_switch *ds, int port, return 0; if (ocelot->npi >= 0) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); - if (felix_cpu_port_for_master(ds, master) != ocelot->npi) { - dev_err(ds->dev, "Multiple masters are not allowed\n"); + if (felix_cpu_port_for_conduit(ds, conduit) != ocelot->npi) { + dev_err(ds->dev, "Multiple conduits are not allowed\n"); return -EINVAL; } } @@ -2164,7 +2164,7 @@ const struct dsa_switch_ops felix_switch_ops = { .port_add_dscp_prio = felix_port_add_dscp_prio, .port_del_dscp_prio = felix_port_del_dscp_prio, .port_set_host_flood = felix_port_set_host_flood, - .port_change_master = felix_port_change_master, + .port_change_conduit = felix_port_change_conduit, }; EXPORT_SYMBOL_GPL(felix_switch_ops); @@ -2176,7 +2176,7 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) if (!dsa_is_user_port(ds, port)) return NULL; - return dsa_to_port(ds, port)->slave; + return dsa_to_port(ds, port)->user; } EXPORT_SYMBOL_GPL(felix_port_to_netdev); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 1d4befe7cf..dbf5872fe3 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -77,9 +77,9 @@ struct felix_tag_proto_ops { int (*setup)(struct dsa_switch *ds); void (*teardown)(struct dsa_switch *ds); unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds); - int (*change_master)(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack); + int (*change_conduit)(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack); }; extern const struct dsa_switch_ops felix_switch_ops; diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c index c29bee5a5c..22187d831c 100644 --- a/drivers/net/dsa/ocelot/ocelot_ext.c +++ b/drivers/net/dsa/ocelot/ocelot_ext.c @@ -115,19 +115,17 @@ err_free_felix: return err; } -static int ocelot_ext_remove(struct platform_device *pdev) +static void ocelot_ext_remove(struct platform_device *pdev) { struct felix *felix = dev_get_drvdata(&pdev->dev); if (!felix) - return 0; + return; dsa_unregister_switch(felix->ds); kfree(felix->ds); kfree(felix); - - return 0; } static void ocelot_ext_shutdown(struct platform_device *pdev) @@ -154,7 +152,7 @@ static struct platform_driver ocelot_ext_switch_driver = { .of_match_table = ocelot_ext_switch_of_match, }, .probe = ocelot_ext_probe, - .remove = ocelot_ext_remove, + .remove_new = ocelot_ext_remove, .shutdown = ocelot_ext_shutdown, }; module_platform_driver(ocelot_ext_switch_driver); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 8f912bda12..049930da05 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -1029,19 +1029,17 @@ err_alloc_felix: return err; } -static int seville_remove(struct platform_device *pdev) +static void seville_remove(struct platform_device *pdev) { struct felix *felix = platform_get_drvdata(pdev); if (!felix) - return 0; + return; dsa_unregister_switch(felix->ds); kfree(felix->ds); kfree(felix); - - return 0; } static void seville_shutdown(struct platform_device *pdev) @@ -1064,7 +1062,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match); static struct platform_driver seville_vsc9953_driver = { .probe = seville_probe, - .remove = seville_remove, + .remove_new = seville_remove, .shutdown = seville_shutdown, .driver = { .name = "mscc_seville", diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index 4ce68e655a..6f2a7aee7e 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -323,14 +323,14 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) mutex_lock(&mgmt_eth_data->mutex); - /* Check mgmt_master if is operational */ - if (!priv->mgmt_master) { + /* Check if the mgmt_conduit if is operational */ + if (!priv->mgmt_conduit) { kfree_skb(skb); mutex_unlock(&mgmt_eth_data->mutex); return -EINVAL; } - skb->dev = priv->mgmt_master; + skb->dev = priv->mgmt_conduit; reinit_completion(&mgmt_eth_data->rw_done); @@ -375,14 +375,14 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) mutex_lock(&mgmt_eth_data->mutex); - /* Check mgmt_master if is operational */ - if (!priv->mgmt_master) { + /* Check if the mgmt_conduit if is operational */ + if (!priv->mgmt_conduit) { kfree_skb(skb); mutex_unlock(&mgmt_eth_data->mutex); return -EINVAL; } - skb->dev = priv->mgmt_master; + skb->dev = priv->mgmt_conduit; reinit_completion(&mgmt_eth_data->rw_done); @@ -508,7 +508,7 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len, struct qca8k_priv *priv = ctx; u32 reg = *(u16 *)reg_buf; - if (priv->mgmt_master && + if (priv->mgmt_conduit && !qca8k_read_eth(priv, reg, val_buf, val_len)) return 0; @@ -531,7 +531,7 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len, u32 reg = *(u16 *)reg_buf; u32 *val = (u32 *)val_buf; - if (priv->mgmt_master && + if (priv->mgmt_conduit && !qca8k_write_eth(priv, reg, val, val_len)) return 0; @@ -626,7 +626,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, struct sk_buff *write_skb, *clear_skb, *read_skb; struct qca8k_mgmt_eth_data *mgmt_eth_data; u32 write_val, clear_val = 0, val; - struct net_device *mgmt_master; + struct net_device *mgmt_conduit; int ret, ret1; bool ack; @@ -683,18 +683,18 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, */ mutex_lock(&mgmt_eth_data->mutex); - /* Check if mgmt_master is operational */ - mgmt_master = priv->mgmt_master; - if (!mgmt_master) { + /* Check if mgmt_conduit is operational */ + mgmt_conduit = priv->mgmt_conduit; + if (!mgmt_conduit) { mutex_unlock(&mgmt_eth_data->mutex); mutex_unlock(&priv->bus->mdio_lock); ret = -EINVAL; - goto err_mgmt_master; + goto err_mgmt_conduit; } - read_skb->dev = mgmt_master; - clear_skb->dev = mgmt_master; - write_skb->dev = mgmt_master; + read_skb->dev = mgmt_conduit; + clear_skb->dev = mgmt_conduit; + write_skb->dev = mgmt_conduit; reinit_completion(&mgmt_eth_data->rw_done); @@ -780,7 +780,7 @@ exit: return ret; /* Error handling before lock */ -err_mgmt_master: +err_mgmt_conduit: kfree_skb(read_skb); err_read_skb: kfree_skb(clear_skb); @@ -949,34 +949,45 @@ qca8k_mdio_register(struct qca8k_priv *priv) struct dsa_switch *ds = priv->ds; struct device_node *mdio; struct mii_bus *bus; + int err; + + mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); bus = devm_mdiobus_alloc(ds->dev); - if (!bus) - return -ENOMEM; + if (!bus) { + err = -ENOMEM; + goto out_put_node; + } bus->priv = (void *)priv; snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", ds->dst->index, ds->index); bus->parent = ds->dev; bus->phy_mask = ~ds->phys_mii_mask; - ds->slave_mii_bus = bus; + ds->user_mii_bus = bus; /* Check if the devicetree declare the port:phy mapping */ - mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); if (of_device_is_available(mdio)) { - bus->name = "qca8k slave mii"; + bus->name = "qca8k user mii"; bus->read = qca8k_internal_mdio_read; bus->write = qca8k_internal_mdio_write; - return devm_of_mdiobus_register(priv->dev, bus, mdio); + err = devm_of_mdiobus_register(priv->dev, bus, mdio); + goto out_put_node; } /* If a mapping can't be found the legacy mapping is used, * using the qca8k_port_to_phy function */ - bus->name = "qca8k-legacy slave mii"; + bus->name = "qca8k-legacy user mii"; bus->read = qca8k_legacy_mdio_read; bus->write = qca8k_legacy_mdio_write; - return devm_mdiobus_register(priv->dev, bus); + + err = devm_mdiobus_register(priv->dev, bus); + +out_put_node: + of_node_put(mdio); + + return err; } static int @@ -1728,10 +1739,10 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port, } static void -qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, - bool operational) +qca8k_conduit_change(struct dsa_switch *ds, const struct net_device *conduit, + bool operational) { - struct dsa_port *dp = master->dsa_ptr; + struct dsa_port *dp = conduit->dsa_ptr; struct qca8k_priv *priv = ds->priv; /* Ethernet MIB/MDIO is only supported for CPU port 0 */ @@ -1741,7 +1752,7 @@ qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, mutex_lock(&priv->mgmt_eth_data.mutex); mutex_lock(&priv->mib_eth_data.mutex); - priv->mgmt_master = operational ? (struct net_device *)master : NULL; + priv->mgmt_conduit = operational ? (struct net_device *)conduit : NULL; mutex_unlock(&priv->mib_eth_data.mutex); mutex_unlock(&priv->mgmt_eth_data.mutex); @@ -2016,7 +2027,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .get_phy_flags = qca8k_get_phy_flags, .port_lag_join = qca8k_port_lag_join, .port_lag_leave = qca8k_port_lag_leave, - .master_state_change = qca8k_master_change, + .conduit_state_change = qca8k_conduit_change, .connect_tag_protocol = qca8k_connect_tag_protocol, }; @@ -2038,12 +2049,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev) priv->info = of_device_get_match_data(priv->dev); priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset", - GPIOD_ASIS); + GPIOD_OUT_HIGH); if (IS_ERR(priv->reset_gpio)) return PTR_ERR(priv->reset_gpio); if (priv->reset_gpio) { - gpiod_set_value_cansleep(priv->reset_gpio, 1); /* The active low duration must be greater than 10 ms * and checkpatch.pl wants 20 ms. */ diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c index fce04ce12c..9243eff891 100644 --- a/drivers/net/dsa/qca/qca8k-common.c +++ b/drivers/net/dsa/qca/qca8k-common.c @@ -487,8 +487,7 @@ void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, return; for (i = 0; i < priv->info->mib_count; i++) - strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, - ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", ar8327_mib[i].name); } void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, @@ -500,7 +499,7 @@ void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, u32 hi = 0; int ret; - if (priv->mgmt_master && priv->info->ops->autocast_mib && + if (priv->mgmt_conduit && priv->info->ops->autocast_mib && priv->info->ops->autocast_mib(ds, port, data) > 0) return; @@ -762,7 +761,7 @@ int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) int ret; /* We have only have a general MTU setting. - * DSA always set the CPU port's MTU to the largest MTU of the slave + * DSA always set the CPU port's MTU to the largest MTU of the user * ports. * Setting MTU just for the CPU port is sufficient to correctly set a * value for every port. diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c index e8c16e76e3..90e30c2909 100644 --- a/drivers/net/dsa/qca/qca8k-leds.c +++ b/drivers/net/dsa/qca/qca8k-leds.c @@ -356,8 +356,8 @@ static struct device *qca8k_cled_hw_control_get_device(struct led_classdev *ldev dp = dsa_to_port(priv->ds, qca8k_phy_to_port(led->port_num)); if (!dp) return NULL; - if (dp->slave) - return &dp->slave->dev; + if (dp->user) + return &dp->user->dev; return NULL; } @@ -429,7 +429,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p init_data.default_label = ":port"; init_data.fwnode = led; init_data.devname_mandatory = true; - init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id, + init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->user_mii_bus->id, port_num); if (!init_data.devicename) return -ENOMEM; diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h index 8f88b7db38..2ac7e88f8d 100644 --- a/drivers/net/dsa/qca/qca8k.h +++ b/drivers/net/dsa/qca/qca8k.h @@ -458,7 +458,7 @@ struct qca8k_priv { struct mutex reg_mutex; struct device *dev; struct gpio_desc *reset_gpio; - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ + struct net_device *mgmt_conduit; /* Track if mdio/mib Ethernet is available */ struct qca8k_mgmt_eth_data mgmt_eth_data; struct qca8k_mib_eth_data mib_eth_data; struct qca8k_mdio_cache mdio_cache; diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c index ff13563059..755546ed8d 100644 --- a/drivers/net/dsa/realtek/realtek-smi.c +++ b/drivers/net/dsa/realtek/realtek-smi.c @@ -378,25 +378,25 @@ static int realtek_smi_setup_mdio(struct dsa_switch *ds) return -ENODEV; } - priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev); - if (!priv->slave_mii_bus) { + priv->user_mii_bus = devm_mdiobus_alloc(priv->dev); + if (!priv->user_mii_bus) { ret = -ENOMEM; goto err_put_node; } - priv->slave_mii_bus->priv = priv; - priv->slave_mii_bus->name = "SMI slave MII"; - priv->slave_mii_bus->read = realtek_smi_mdio_read; - priv->slave_mii_bus->write = realtek_smi_mdio_write; - snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d", + priv->user_mii_bus->priv = priv; + priv->user_mii_bus->name = "SMI user MII"; + priv->user_mii_bus->read = realtek_smi_mdio_read; + priv->user_mii_bus->write = realtek_smi_mdio_write; + snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); - priv->slave_mii_bus->dev.of_node = mdio_np; - priv->slave_mii_bus->parent = priv->dev; - ds->slave_mii_bus = priv->slave_mii_bus; + priv->user_mii_bus->dev.of_node = mdio_np; + priv->user_mii_bus->parent = priv->dev; + ds->user_mii_bus = priv->user_mii_bus; - ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np); + ret = devm_of_mdiobus_register(priv->dev, priv->user_mii_bus, mdio_np); if (ret) { dev_err(priv->dev, "unable to register MDIO bus %s\n", - priv->slave_mii_bus->id); + priv->user_mii_bus->id); goto err_put_node; } @@ -506,22 +506,20 @@ static int realtek_smi_probe(struct platform_device *pdev) return 0; } -static int realtek_smi_remove(struct platform_device *pdev) +static void realtek_smi_remove(struct platform_device *pdev) { struct realtek_priv *priv = platform_get_drvdata(pdev); if (!priv) - return 0; + return; dsa_unregister_switch(priv->ds); - if (priv->slave_mii_bus) - of_node_put(priv->slave_mii_bus->dev.of_node); + if (priv->user_mii_bus) + of_node_put(priv->user_mii_bus->dev.of_node); /* leave the device reset asserted */ if (priv->reset) gpiod_set_value(priv->reset, 1); - - return 0; } static void realtek_smi_shutdown(struct platform_device *pdev) @@ -559,7 +557,7 @@ static struct platform_driver realtek_smi_driver = { .of_match_table = realtek_smi_of_match, }, .probe = realtek_smi_probe, - .remove = realtek_smi_remove, + .remove_new = realtek_smi_remove, .shutdown = realtek_smi_shutdown, }; module_platform_driver(realtek_smi_driver); diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h index 4fa7c6ba87..790488e9c6 100644 --- a/drivers/net/dsa/realtek/realtek.h +++ b/drivers/net/dsa/realtek/realtek.h @@ -54,7 +54,7 @@ struct realtek_priv { struct regmap *map; struct regmap *map_nolock; struct mutex map_lock; - struct mii_bus *slave_mii_bus; + struct mii_bus *user_mii_bus; struct mii_bus *bus; int mdio_addr; diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 41ea3b5a42..0875e4fc9f 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -1144,7 +1144,7 @@ static int rtl8365mb_port_change_mtu(struct dsa_switch *ds, int port, int frame_size; /* When a new MTU is set, DSA always sets the CPU port's MTU to the - * largest MTU of the slave ports. Because the switch only has a global + * largest MTU of the user ports. Because the switch only has a global * RX length register, only allowing CPU port here is enough. */ if (!dsa_is_cpu_port(ds, port)) @@ -1303,8 +1303,7 @@ static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset for (i = 0; i < RTL8365MB_MIB_END; i++) { struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; - - strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", mib->name); } } diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c index dc5f75be30..82e267b8fd 100644 --- a/drivers/net/dsa/realtek/rtl8366-core.c +++ b/drivers/net/dsa/realtek/rtl8366-core.c @@ -395,17 +395,13 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { struct realtek_priv *priv = ds->priv; - struct rtl8366_mib_counter *mib; int i; if (port >= priv->num_ports) return; - for (i = 0; i < priv->num_mib_counters; i++) { - mib = &priv->mib_counters[i]; - strncpy(data + i * ETH_GSTRING_LEN, - mib->name, ETH_GSTRING_LEN); - } + for (i = 0; i < priv->num_mib_counters; i++) + ethtool_sprintf(&data, "%s", priv->mib_counters[i].name); } EXPORT_SYMBOL_GPL(rtl8366_get_strings); diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 7868ef237f..b39b719a5b 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -95,12 +95,6 @@ #define RTL8366RB_PAACR_RX_PAUSE BIT(6) #define RTL8366RB_PAACR_AN BIT(7) -#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \ - RTL8366RB_PAACR_FULL_DUPLEX | \ - RTL8366RB_PAACR_LINK_UP | \ - RTL8366RB_PAACR_TX_PAUSE | \ - RTL8366RB_PAACR_RX_PAUSE) - /* bits 0..7 = port 0, bits 8..15 = port 1 */ #define RTL8366RB_PSTAT0 0x0014 /* bits 0..7 = port 2, bits 8..15 = port 3 */ @@ -1081,29 +1075,61 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, int speed, int duplex, bool tx_pause, bool rx_pause) { struct realtek_priv *priv = ds->priv; + unsigned int val; int ret; + /* Allow forcing the mode on the fixed CPU port, no autonegotiation. + * We assume autonegotiation works on the PHY-facing ports. + */ if (port != priv->cpu_port) return; dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port); - /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG, BIT(port), BIT(port)); if (ret) { - dev_err(priv->dev, "failed to force 1Gbit on CPU port\n"); + dev_err(priv->dev, "failed to force CPU port\n"); return; } + /* Conjure port config */ + switch (speed) { + case SPEED_10: + val = RTL8366RB_PAACR_SPEED_10M; + break; + case SPEED_100: + val = RTL8366RB_PAACR_SPEED_100M; + break; + case SPEED_1000: + val = RTL8366RB_PAACR_SPEED_1000M; + break; + default: + val = RTL8366RB_PAACR_SPEED_1000M; + break; + } + + if (duplex == DUPLEX_FULL) + val |= RTL8366RB_PAACR_FULL_DUPLEX; + + if (tx_pause) + val |= RTL8366RB_PAACR_TX_PAUSE; + + if (rx_pause) + val |= RTL8366RB_PAACR_RX_PAUSE; + + val |= RTL8366RB_PAACR_LINK_UP; + ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2, 0xFF00U, - RTL8366RB_PAACR_CPU_PORT << 8); + val << 8); if (ret) { dev_err(priv->dev, "failed to set PAACR on CPU port\n"); return; } + dev_dbg(priv->dev, "set PAACR to %04x\n", val); + /* Enable the CPU port */ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port), 0); diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 2eda10b33f..10092ea85e 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -1272,19 +1272,17 @@ free_pcs: return ret; } -static int a5psw_remove(struct platform_device *pdev) +static void a5psw_remove(struct platform_device *pdev) { struct a5psw *a5psw = platform_get_drvdata(pdev); if (!a5psw) - return 0; + return; dsa_unregister_switch(&a5psw->ds); a5psw_pcs_free(a5psw); clk_disable_unprepare(a5psw->hclk); clk_disable_unprepare(a5psw->clk); - - return 0; } static void a5psw_shutdown(struct platform_device *pdev) @@ -1311,7 +1309,7 @@ static struct platform_driver a5psw_driver = { .of_match_table = a5psw_of_mtable, }, .probe = a5psw_probe, - .remove = a5psw_remove, + .remove_new = a5psw_remove, .shutdown = a5psw_shutdown, }; module_platform_driver(a5psw_driver); diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c index e3699f76f6..08a3e7b962 100644 --- a/drivers/net/dsa/sja1105/sja1105_clocking.c +++ b/drivers/net/dsa/sja1105/sja1105_clocking.c @@ -153,14 +153,14 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv, { const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_tx_clk; - const int mac_clk_sources[] = { + static const int mac_clk_sources[] = { CLKSRC_MII0_TX_CLK, CLKSRC_MII1_TX_CLK, CLKSRC_MII2_TX_CLK, CLKSRC_MII3_TX_CLK, CLKSRC_MII4_TX_CLK, }; - const int phy_clk_sources[] = { + static const int phy_clk_sources[] = { CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, @@ -194,7 +194,7 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port) const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_rx_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_MII0_RX_CLK, CLKSRC_MII1_RX_CLK, CLKSRC_MII2_RX_CLK, @@ -221,7 +221,7 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port) const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_ext_tx_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, @@ -248,7 +248,7 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port) const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_ext_rx_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, @@ -349,8 +349,13 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv, if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) { clksrc = CLKSRC_PLL0; } else { - int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, - CLKSRC_IDIV3, CLKSRC_IDIV4}; + static const int clk_sources[] = { + CLKSRC_IDIV0, + CLKSRC_IDIV1, + CLKSRC_IDIV2, + CLKSRC_IDIV3, + CLKSRC_IDIV4, + }; clksrc = clk_sources[port]; } @@ -638,7 +643,7 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv, const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl ref_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_MII0_TX_CLK, CLKSRC_MII1_TX_CLK, CLKSRC_MII2_TX_CLK, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 1a367e64bc..74cee39d73 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2688,7 +2688,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, } /* Transfer skb to the host port. */ - dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); + dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user); /* Wait until the switch has processed the frame */ do { @@ -3081,7 +3081,7 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, * ref_clk pin. So port clocking needs to be initialized early, before * connecting to PHYs is attempted, otherwise they won't respond through MDIO. * Setting correct PHY link speed does not matter now. - * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY + * But dsa_user_phy_setup is called later than sja1105_setup, so the PHY * bindings are not yet parsed by DSA core. We need to parse early so that we * can populate the xMII mode parameters table. */ diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index c99fb1bd4c..571f037fe6 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -928,7 +928,8 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, const struct vsc73xx_counter *cnt; struct vsc73xx *vsc = ds->priv; u8 indices[6]; - int i, j; + u8 *buf = data; + int i; u32 val; int ret; @@ -948,10 +949,7 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */ /* The first counters is the RX octets */ - j = 0; - strncpy(data + j * ETH_GSTRING_LEN, - "RxEtherStatsOctets", ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "RxEtherStatsOctets"); /* Each port supports recording 3 RX counters and 3 TX counters, * figure out what counters we use in this set-up and return the @@ -961,23 +959,16 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, */ for (i = 0; i < 3; i++) { cnt = vsc73xx_find_counter(vsc, indices[i], false); - if (cnt) - strncpy(data + j * ETH_GSTRING_LEN, - cnt->name, ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "%s", cnt ? cnt->name : ""); } /* TX stats begins with the number of TX octets */ - strncpy(data + j * ETH_GSTRING_LEN, - "TxEtherStatsOctets", ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "TxEtherStatsOctets"); for (i = 3; i < 6; i++) { cnt = vsc73xx_find_counter(vsc, indices[i], true); - if (cnt) - strncpy(data + j * ETH_GSTRING_LEN, - cnt->name, ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "%s", cnt ? cnt->name : ""); + } } @@ -1037,6 +1028,31 @@ static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port) return 9600 - ETH_HLEN - ETH_FCS_LEN; } +static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port, + struct phylink_config *config) +{ + unsigned long *interfaces = config->supported_interfaces; + + if (port == 5) + return; + + if (port == CPU_PORT) { + __set_bit(PHY_INTERFACE_MODE_MII, interfaces); + __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, interfaces); + __set_bit(PHY_INTERFACE_MODE_RGMII, interfaces); + } + + if (port <= 4) { + /* Internal PHYs */ + __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces); + /* phylib default */ + __set_bit(PHY_INTERFACE_MODE_GMII, interfaces); + } + + config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000; +} + static const struct dsa_switch_ops vsc73xx_ds_ops = { .get_tag_protocol = vsc73xx_get_tag_protocol, .setup = vsc73xx_setup, @@ -1050,6 +1066,7 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = { .port_disable = vsc73xx_port_disable, .port_change_mtu = vsc73xx_change_mtu, .port_max_mtu = vsc73xx_get_max_mtu, + .phylink_get_caps = vsc73xx_phylink_get_caps, }; static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c index bd4206e8f9..755b7895a1 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-platform.c +++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c @@ -112,16 +112,14 @@ static int vsc73xx_platform_probe(struct platform_device *pdev) return vsc73xx_probe(&vsc_platform->vsc); } -static int vsc73xx_platform_remove(struct platform_device *pdev) +static void vsc73xx_platform_remove(struct platform_device *pdev) { struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev); if (!vsc_platform) - return 0; + return; vsc73xx_remove(&vsc_platform->vsc); - - return 0; } static void vsc73xx_platform_shutdown(struct platform_device *pdev) @@ -160,7 +158,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match); static struct platform_driver vsc73xx_platform_driver = { .probe = vsc73xx_platform_probe, - .remove = vsc73xx_platform_remove, + .remove_new = vsc73xx_platform_remove, .shutdown = vsc73xx_platform_shutdown, .driver = { .name = "vsc73xx-platform", diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 753fef757f..96db032b47 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -548,12 +548,13 @@ static void xrs700x_bridge_leave(struct dsa_switch *ds, int port, } static int xrs700x_hsr_join(struct dsa_switch *ds, int port, - struct net_device *hsr) + struct net_device *hsr, + struct netlink_ext_ack *extack) { unsigned int val = XRS_HSR_CFG_HSR_PRP; struct dsa_port *partner = NULL, *dp; struct xrs700x *priv = ds->priv; - struct net_device *slave; + struct net_device *user; int ret, i, hsr_pair[2]; enum hsr_version ver; bool fwd = false; @@ -562,16 +563,21 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, if (ret) return ret; - /* Only ports 1 and 2 can be HSR/PRP redundant ports. */ - if (port != 1 && port != 2) + if (port != 1 && port != 2) { + NL_SET_ERR_MSG_MOD(extack, + "Only ports 1 and 2 can offload HSR/PRP"); return -EOPNOTSUPP; + } - if (ver == HSR_V1) + if (ver == HSR_V1) { val |= XRS_HSR_CFG_HSR; - else if (ver == PRP_V1) + } else if (ver == PRP_V1) { val |= XRS_HSR_CFG_PRP; - else + } else { + NL_SET_ERR_MSG_MOD(extack, + "Only HSR v1 and PRP v1 can be offloaded"); return -EOPNOTSUPP; + } dsa_hsr_foreach_port(dp, ds, hsr) { if (dp->index != port) { @@ -632,8 +638,8 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, hsr_pair[0] = port; hsr_pair[1] = partner->index; for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) { - slave = dsa_to_port(ds, hsr_pair[i])->slave; - slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES; + user = dsa_to_port(ds, hsr_pair[i])->user; + user->features |= XRS7000X_SUPPORTED_HSR_FEATURES; } return 0; @@ -644,7 +650,7 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port, { struct dsa_port *partner = NULL, *dp; struct xrs700x *priv = ds->priv; - struct net_device *slave; + struct net_device *user; int i, hsr_pair[2]; unsigned int val; @@ -686,8 +692,8 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port, hsr_pair[0] = port; hsr_pair[1] = partner->index; for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) { - slave = dsa_to_port(ds, hsr_pair[i])->slave; - slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES; + user = dsa_to_port(ds, hsr_pair[i])->user; + user->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES; } return 0; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index c4b1b0aa43..768454aa36 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -202,4 +202,5 @@ static void __exit dummy_cleanup_module(void) module_init(dummy_init_module); module_exit(dummy_cleanup_module); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Dummy netdevice driver which discards all packets sent to it"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/eql.c b/drivers/net/eql.c index ca3e4700a8..3c2efda916 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -607,4 +607,5 @@ static void __exit eql_cleanup_module(void) module_init(eql_init_module); module_exit(eql_cleanup_module); +MODULE_DESCRIPTION("Equalizer Load-balancer for serial network interfaces"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index af603256b7..2874680ef2 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -811,7 +811,7 @@ static int ax_init_dev(struct net_device *dev) return ret; } -static int ax_remove(struct platform_device *pdev) +static void ax_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ei_device *ei_local = netdev_priv(dev); @@ -832,8 +832,6 @@ static int ax_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); free_netdev(dev); - - return 0; } /* @@ -1011,7 +1009,7 @@ static struct platform_driver axdrv = { .name = "ax88796", }, .probe = ax_probe, - .remove = ax_remove, + .remove_new = ax_remove, .suspend = ax_suspend, .resume = ax_resume, }; diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 217838b282..5a0fa995e6 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -441,7 +441,7 @@ static int mcf8390_probe(struct platform_device *pdev) return 0; } -static int mcf8390_remove(struct platform_device *pdev) +static void mcf8390_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct resource *mem; @@ -450,7 +450,6 @@ static int mcf8390_remove(struct platform_device *pdev) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); free_netdev(dev); - return 0; } static struct platform_driver mcf8390_drv = { @@ -458,7 +457,7 @@ static struct platform_driver mcf8390_drv = { .name = "mcf8390", }, .probe = mcf8390_probe, - .remove = mcf8390_remove, + .remove_new = mcf8390_remove, }; module_platform_driver(mcf8390_drv); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 7d89ec1cf2..350683a09d 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -823,7 +823,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) return 0; } -static int ne_drv_remove(struct platform_device *pdev) +static void ne_drv_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -842,7 +842,6 @@ static int ne_drv_remove(struct platform_device *pdev) release_region(dev->base_addr, NE_IO_EXTENT); free_netdev(dev); } - return 0; } /* Remove unused devices or all if true. */ @@ -895,7 +894,7 @@ static int ne_drv_resume(struct platform_device *pdev) #endif static struct platform_driver ne_driver = { - .remove = ne_drv_remove, + .remove_new = ne_drv_remove, .suspend = ne_drv_suspend, .resume = ne_drv_resume, .driver = { diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index c6f8f852bf..e03193da58 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -1582,15 +1582,13 @@ static int owl_emac_probe(struct platform_device *pdev) return 0; } -static int owl_emac_remove(struct platform_device *pdev) +static void owl_emac_remove(struct platform_device *pdev) { struct owl_emac_priv *priv = platform_get_drvdata(pdev); netif_napi_del(&priv->napi); phy_disconnect(priv->netdev->phydev); cancel_work_sync(&priv->mac_reset_task); - - return 0; } static const struct of_device_id owl_emac_of_match[] = { @@ -1609,7 +1607,7 @@ static struct platform_driver owl_emac_driver = { .pm = &owl_emac_pm_ops, }, .probe = owl_emac_probe, - .remove = owl_emac_remove, + .remove_new = owl_emac_remove, }; module_platform_driver(owl_emac_driver); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 597a02c75d..27af7746d6 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1525,7 +1525,7 @@ error1: return err; } -static int greth_of_remove(struct platform_device *of_dev) +static void greth_of_remove(struct platform_device *of_dev) { struct net_device *ndev = platform_get_drvdata(of_dev); struct greth_private *greth = netdev_priv(ndev); @@ -1544,8 +1544,6 @@ static int greth_of_remove(struct platform_device *of_dev) of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); free_netdev(ndev); - - return 0; } static const struct of_device_id greth_of_match[] = { @@ -1566,7 +1564,7 @@ static struct platform_driver greth_of_driver = { .of_match_table = greth_of_match, }, .probe = greth_of_probe, - .remove = greth_of_remove, + .remove_new = greth_of_remove, }; module_platform_driver(greth_of_driver); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index a94c62956e..d761c08fe5 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -1083,7 +1083,7 @@ out: return ret; } -static int emac_remove(struct platform_device *pdev) +static void emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct emac_board_info *db = netdev_priv(ndev); @@ -1101,7 +1101,6 @@ static int emac_remove(struct platform_device *pdev) free_netdev(ndev); dev_dbg(&pdev->dev, "released and freed device\n"); - return 0; } static int emac_suspend(struct platform_device *dev, pm_message_t state) @@ -1143,7 +1142,7 @@ static struct platform_driver emac_driver = { .of_match_table = emac_of_match, }, .probe = emac_probe, - .remove = emac_remove, + .remove_new = emac_remove, .suspend = emac_suspend, .resume = emac_resume, }; diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index db5eed06e9..82f2363a45 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -472,7 +472,7 @@ struct altera_tse_private { /* ethtool msglvl option */ u32 msg_enable; - struct altera_dmaops *dmaops; + const struct altera_dmaops *dmaops; struct phylink *phylink; struct phylink_config phylink_config; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 2e15800e53..1c8763be0e 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -29,13 +29,13 @@ #include #include #include -#include +#include #include #include -#include #include #include #include +#include #include #include #include @@ -82,8 +82,6 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); #define TXQUEUESTOP_THRESHHOLD 2 -static const struct of_device_id altera_tse_ids[]; - static inline u32 tse_tx_avail(struct altera_tse_private *priv) { return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; @@ -1133,7 +1131,6 @@ static int request_and_map(struct platform_device *pdev, const char *name, */ static int altera_tse_probe(struct platform_device *pdev) { - const struct of_device_id *of_id = NULL; struct regmap_config pcs_regmap_cfg; struct altera_tse_private *priv; struct mdio_regmap_config mrc; @@ -1159,11 +1156,7 @@ static int altera_tse_probe(struct platform_device *pdev) priv->dev = ndev; priv->msg_enable = netif_msg_init(debug, default_msg_level); - of_id = of_match_device(altera_tse_ids, &pdev->dev); - - if (of_id) - priv->dmaops = (struct altera_dmaops *)of_id->data; - + priv->dmaops = device_get_match_data(&pdev->dev); if (priv->dmaops && priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { @@ -1464,7 +1457,7 @@ err_free_netdev: /* Remove Altera TSE MAC device */ -static int altera_tse_remove(struct platform_device *pdev) +static void altera_tse_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct altera_tse_private *priv = netdev_priv(ndev); @@ -1476,8 +1469,6 @@ static int altera_tse_remove(struct platform_device *pdev) lynx_pcs_destroy(priv->pcs); free_netdev(ndev); - - return 0; } static const struct altera_dmaops altera_dtype_sgdma = { @@ -1528,7 +1519,7 @@ MODULE_DEVICE_TABLE(of, altera_tse_ids); static struct platform_driver altera_tse_driver = { .probe = altera_tse_probe, - .remove = altera_tse_remove, + .remove_new = altera_tse_remove, .suspend = NULL, .resume = NULL, .driver = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 14e41eb577..c44c44e26d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1827,7 +1827,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, } if (xdp_flags & ENA_XDP_REDIRECT) - xdp_do_flush_map(); + xdp_do_flush(); return work_done; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index c5cec4e794..85c978149b 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1323,7 +1323,7 @@ out: return err; } -static int au1000_remove(struct platform_device *pdev) +static void au1000_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct au1000_private *aup = netdev_priv(dev); @@ -1359,13 +1359,11 @@ static int au1000_remove(struct platform_device *pdev) release_mem_region(macen->start, resource_size(macen)); free_netdev(dev); - - return 0; } static struct platform_driver au1000_eth_driver = { .probe = au1000_probe, - .remove = au1000_remove, + .remove_new = au1000_remove, .driver = { .name = "au1000-eth", }, diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c index 5beadabc21..ea773cfa0a 100644 --- a/drivers/net/ethernet/amd/pds_core/adminq.c +++ b/drivers/net/ethernet/amd/pds_core/adminq.c @@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq) return nq_work; } +static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc) +{ + if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) || + pdsc->state & BIT_ULL(PDSC_S_FW_DEAD)) + return false; + + return refcount_inc_not_zero(&pdsc->adminq_refcnt); +} + void pdsc_process_adminq(struct pdsc_qcq *qcq) { union pds_core_adminq_comp *comp; @@ -75,9 +84,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq) int aq_work = 0; int credits; - /* Don't process AdminQ when shutting down */ - if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) { - dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n", + /* Don't process AdminQ when it's not up */ + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_err(pdsc->dev, "%s: called while adminq is unavailable\n", __func__); return; } @@ -124,6 +133,7 @@ credits: pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx], credits, PDS_CORE_INTR_CRED_REARM); + refcount_dec(&pdsc->adminq_refcnt); } void pdsc_work_thread(struct work_struct *work) @@ -135,18 +145,20 @@ void pdsc_work_thread(struct work_struct *work) irqreturn_t pdsc_adminq_isr(int irq, void *data) { - struct pdsc_qcq *qcq = data; - struct pdsc *pdsc = qcq->pdsc; + struct pdsc *pdsc = data; + struct pdsc_qcq *qcq; - /* Don't process AdminQ when shutting down */ - if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) { - dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n", + /* Don't process AdminQ when it's not up */ + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_err(pdsc->dev, "%s: called while adminq is unavailable\n", __func__); return IRQ_HANDLED; } + qcq = &pdsc->adminqcq; queue_work(pdsc->wq, &qcq->work); pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR); + refcount_dec(&pdsc->adminq_refcnt); return IRQ_HANDLED; } @@ -179,10 +191,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc, /* Check that the FW is running */ if (!pdsc_is_fw_running(pdsc)) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); - - dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n", - __func__, fw_status); + if (pdsc->info_regs) { + u8 fw_status = + ioread8(&pdsc->info_regs->fw_status); + + dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n", + __func__, fw_status); + } else { + dev_info(pdsc->dev, "%s: post failed - BARs not setup\n", + __func__); + } ret = -ENXIO; goto err_out_unlock; @@ -230,6 +248,12 @@ int pdsc_adminq_post(struct pdsc *pdsc, int err = 0; int index; + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n", + __func__, cmd->opcode); + return -ENXIO; + } + wc.qcq = &pdsc->adminqcq; index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc); if (index < 0) { @@ -248,10 +272,16 @@ int pdsc_adminq_post(struct pdsc *pdsc, break; if (!pdsc_is_fw_running(pdsc)) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); - - dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n", - __func__, fw_status); + if (pdsc->info_regs) { + u8 fw_status = + ioread8(&pdsc->info_regs->fw_status); + + dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n", + __func__, fw_status); + } else { + dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n", + __func__); + } err = -ENXIO; break; } @@ -285,6 +315,8 @@ err_out: queue_work(pdsc->wq, &pdsc->health_work); } + refcount_dec(&pdsc->adminq_refcnt); + return err; } EXPORT_SYMBOL_GPL(pdsc_adminq_post); diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c index 36f9b932b9..7658a72867 100644 --- a/drivers/net/ethernet/amd/pds_core/core.c +++ b/drivers/net/ethernet/amd/pds_core/core.c @@ -125,7 +125,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq) snprintf(name, sizeof(name), "%s-%d-%s", PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name); - index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq); + index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc); if (index < 0) return index; qcq->intx = index; @@ -152,11 +152,8 @@ void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq) dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); - if (qcq->cq.info) - vfree(qcq->cq.info); - - if (qcq->q.info) - vfree(qcq->q.info); + vfree(qcq->cq.info); + vfree(qcq->q.info); memset(qcq, 0, sizeof(*qcq)); } @@ -407,10 +404,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init) int numdescs; int err; - if (init) - err = pdsc_dev_init(pdsc); - else - err = pdsc_dev_reinit(pdsc); + err = pdsc_dev_init(pdsc); if (err) return err; @@ -445,13 +439,15 @@ int pdsc_setup(struct pdsc *pdsc, bool init) goto err_out_teardown; /* Set up the VIFs */ - err = pdsc_viftypes_init(pdsc); - if (err) - goto err_out_teardown; + if (init) { + err = pdsc_viftypes_init(pdsc); + if (err) + goto err_out_teardown; - if (init) pdsc_debugfs_add_viftype(pdsc); + } + refcount_set(&pdsc->adminq_refcnt, 1); clear_bit(PDSC_S_FW_DEAD, &pdsc->state); return 0; @@ -466,20 +462,23 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing) if (!pdsc->pdev->is_virtfn) pdsc_devcmd_reset(pdsc); + if (pdsc->adminqcq.work.func) + cancel_work_sync(&pdsc->adminqcq.work); pdsc_qcq_free(pdsc, &pdsc->notifyqcq); pdsc_qcq_free(pdsc, &pdsc->adminqcq); - kfree(pdsc->viftype_status); - pdsc->viftype_status = NULL; + if (removing) { + kfree(pdsc->viftype_status); + pdsc->viftype_status = NULL; + } if (pdsc->intr_info) { for (i = 0; i < pdsc->nintrs; i++) pdsc_intr_free(pdsc, i); - if (removing) { - kfree(pdsc->intr_info); - pdsc->intr_info = NULL; - } + kfree(pdsc->intr_info); + pdsc->intr_info = NULL; + pdsc->nintrs = 0; } if (pdsc->kern_dbpage) { @@ -487,6 +486,7 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing) pdsc->kern_dbpage = NULL; } + pci_free_irq_vectors(pdsc->pdev); set_bit(PDSC_S_FW_DEAD, &pdsc->state); } @@ -512,7 +512,25 @@ void pdsc_stop(struct pdsc *pdsc) PDS_CORE_INTR_MASK_SET); } -static void pdsc_fw_down(struct pdsc *pdsc) +static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc) +{ + /* The driver initializes the adminq_refcnt to 1 when the adminq is + * allocated and ready for use. Other users/requesters will increment + * the refcnt while in use. If the refcnt is down to 1 then the adminq + * is not in use and the refcnt can be cleared and adminq freed. Before + * calling this function the driver will set PDSC_S_FW_DEAD, which + * prevent subsequent attempts to use the adminq and increment the + * refcnt to fail. This guarantees that this function will eventually + * exit. + */ + while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) { + dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n", + __func__); + cpu_relax(); + } +} + +void pdsc_fw_down(struct pdsc *pdsc) { union pds_core_notifyq_comp reset_event = { .reset.ecode = cpu_to_le16(PDS_EVENT_RESET), @@ -520,10 +538,15 @@ static void pdsc_fw_down(struct pdsc *pdsc) }; if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) { - dev_err(pdsc->dev, "%s: already happening\n", __func__); + dev_warn(pdsc->dev, "%s: already happening\n", __func__); return; } + if (pdsc->pdev->is_virtfn) + return; + + pdsc_adminq_wait_and_dec_once_unused(pdsc); + /* Notify clients of fw_down */ if (pdsc->fw_reporter) devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc); @@ -533,7 +556,7 @@ static void pdsc_fw_down(struct pdsc *pdsc) pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY); } -static void pdsc_fw_up(struct pdsc *pdsc) +void pdsc_fw_up(struct pdsc *pdsc) { union pds_core_notifyq_comp reset_event = { .reset.ecode = cpu_to_le16(PDS_EVENT_RESET), @@ -546,6 +569,11 @@ static void pdsc_fw_up(struct pdsc *pdsc) return; } + if (pdsc->pdev->is_virtfn) { + clear_bit(PDSC_S_FW_DEAD, &pdsc->state); + return; + } + err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY); if (err) goto err_out; @@ -567,6 +595,24 @@ err_out: pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY); } +static void pdsc_check_pci_health(struct pdsc *pdsc) +{ + u8 fw_status; + + /* some sort of teardown already in progress */ + if (!pdsc->info_regs) + return; + + fw_status = ioread8(&pdsc->info_regs->fw_status); + + /* is PCI broken? */ + if (fw_status != PDS_RC_BAD_PCI) + return; + + pdsc_reset_prepare(pdsc->pdev); + pdsc_reset_done(pdsc->pdev); +} + void pdsc_health_thread(struct work_struct *work) { struct pdsc *pdsc = container_of(work, struct pdsc, health_work); @@ -593,6 +639,8 @@ void pdsc_health_thread(struct work_struct *work) pdsc_fw_down(pdsc); } + pdsc_check_pci_health(pdsc); + pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION; out_unlock: diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index b1c1f1007b..110c4b826b 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -184,6 +184,7 @@ struct pdsc { struct mutex devcmd_lock; /* lock for dev_cmd operations */ struct mutex config_lock; /* lock for configuration operations */ spinlock_t adminq_lock; /* lock for adminq operations */ + refcount_t adminq_refcnt; struct pds_core_dev_info_regs __iomem *info_regs; struct pds_core_dev_cmd_regs __iomem *cmd_regs; struct pds_core_intr __iomem *intr_ctrl; @@ -280,9 +281,11 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd, union pds_core_dev_comp *comp, int max_seconds); int pdsc_devcmd_init(struct pdsc *pdsc); int pdsc_devcmd_reset(struct pdsc *pdsc); -int pdsc_dev_reinit(struct pdsc *pdsc); int pdsc_dev_init(struct pdsc *pdsc); +void pdsc_reset_prepare(struct pci_dev *pdev); +void pdsc_reset_done(struct pci_dev *pdev); + int pdsc_intr_alloc(struct pdsc *pdsc, char *name, irq_handler_t handler, void *data); void pdsc_intr_free(struct pdsc *pdsc, int index); @@ -309,4 +312,8 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data); int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw, struct netlink_ext_ack *extack); + +void pdsc_fw_down(struct pdsc *pdsc); +void pdsc_fw_up(struct pdsc *pdsc); + #endif /* _PDSC_H_ */ diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c index 8ec392299b..4e8579ca1c 100644 --- a/drivers/net/ethernet/amd/pds_core/debugfs.c +++ b/drivers/net/ethernet/amd/pds_core/debugfs.c @@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity); void pdsc_debugfs_add_ident(struct pdsc *pdsc) { + /* This file will already exist in the reset flow */ + if (debugfs_lookup("identity", pdsc->dentry)) + return; + debugfs_create_file("identity", 0400, pdsc->dentry, pdsc, &identity_fops); } diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c index eb178728ed..e65a1632df 100644 --- a/drivers/net/ethernet/amd/pds_core/dev.c +++ b/drivers/net/ethernet/amd/pds_core/dev.c @@ -42,6 +42,8 @@ int pdsc_err_to_errno(enum pds_core_status_code code) return -ERANGE; case PDS_RC_BAD_ADDR: return -EFAULT; + case PDS_RC_BAD_PCI: + return -ENXIO; case PDS_RC_EOPCODE: case PDS_RC_EINTR: case PDS_RC_DEV_CMD: @@ -55,6 +57,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code) bool pdsc_is_fw_running(struct pdsc *pdsc) { + if (!pdsc->info_regs) + return false; + pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status); pdsc->last_fw_time = jiffies; pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat); @@ -62,7 +67,7 @@ bool pdsc_is_fw_running(struct pdsc *pdsc) /* Firmware is useful only if the running bit is set and * fw_status != 0xff (bad PCI read) */ - return (pdsc->fw_status != 0xff) && + return (pdsc->fw_status != PDS_RC_BAD_PCI) && (pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING); } @@ -128,6 +133,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds) unsigned long max_wait; unsigned long duration; int timeout = 0; + bool running; int done = 0; int err = 0; int status; @@ -136,6 +142,10 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds) max_wait = start_time + (max_seconds * HZ); while (!done && !timeout) { + running = pdsc_is_fw_running(pdsc); + if (!running) + break; + done = pdsc_devcmd_done(pdsc); if (done) break; @@ -152,7 +162,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds) dev_dbg(dev, "DEVCMD %d %s after %ld secs\n", opcode, pdsc_devcmd_str(opcode), duration / HZ); - if (!done || timeout) { + if ((!done || timeout) && running) { dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n", opcode, pdsc_devcmd_str(opcode), done, timeout, max_seconds); @@ -175,13 +185,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd, { int err; + if (!pdsc->cmd_regs) + return -ENXIO; + memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd)); pdsc_devcmd_dbell(pdsc); err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds); - memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp)); if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq) queue_work(pdsc->wq, &pdsc->health_work); + else + memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp)); return err; } @@ -302,13 +316,6 @@ static int pdsc_identify(struct pdsc *pdsc) return 0; } -int pdsc_dev_reinit(struct pdsc *pdsc) -{ - pdsc_init_devinfo(pdsc); - - return pdsc_identify(pdsc); -} - int pdsc_dev_init(struct pdsc *pdsc) { unsigned int nintrs; diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index d2abf32b93..54864f27c8 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -111,7 +111,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, mutex_lock(&pdsc->devcmd_lock); err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2); - memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); + if (!err) + memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); mutex_unlock(&pdsc->devcmd_lock); if (err && err != -EIO) return err; @@ -124,6 +125,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, snprintf(buf, sizeof(buf), "fw.slot_%d", i); err = devlink_info_version_stored_put(req, buf, fw_list.fw_names[i].fw_version); + if (err) + return err; } err = devlink_info_version_running_put(req, @@ -154,33 +157,20 @@ int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct pdsc *pdsc = devlink_health_reporter_priv(reporter); - int err; mutex_lock(&pdsc->config_lock); - if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) - err = devlink_fmsg_string_pair_put(fmsg, "Status", "dead"); + devlink_fmsg_string_pair_put(fmsg, "Status", "dead"); else if (!pdsc_is_fw_good(pdsc)) - err = devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy"); + devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy"); else - err = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); - + devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); mutex_unlock(&pdsc->config_lock); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "State", - pdsc->fw_status & - ~PDS_CORE_FW_STS_F_GENERATION); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "Generation", - pdsc->fw_generation >> 4); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "State", + pdsc->fw_status & ~PDS_CORE_FW_STS_F_GENERATION); + devlink_fmsg_u32_pair_put(fmsg, "Generation", pdsc->fw_generation >> 4); + devlink_fmsg_u32_pair_put(fmsg, "Recoveries", pdsc->fw_recoveries); - return devlink_fmsg_u32_pair_put(fmsg, "Recoveries", - pdsc->fw_recoveries); + return 0; } diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c index 90a811f387..fa626719e6 100644 --- a/drivers/net/ethernet/amd/pds_core/fw.c +++ b/drivers/net/ethernet/amd/pds_core/fw.c @@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw, dev_info(pdsc->dev, "Installing firmware\n"); + if (!pdsc->cmd_regs) + return -ENXIO; + dl = priv_to_devlink(pdsc); devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c index 3a45bf474a..cdbf053b53 100644 --- a/drivers/net/ethernet/amd/pds_core/main.c +++ b/drivers/net/ethernet/amd/pds_core/main.c @@ -37,6 +37,11 @@ static void pdsc_unmap_bars(struct pdsc *pdsc) struct pdsc_dev_bar *bars = pdsc->bars; unsigned int i; + pdsc->info_regs = NULL; + pdsc->cmd_regs = NULL; + pdsc->intr_status = NULL; + pdsc->intr_ctrl = NULL; + for (i = 0; i < PDS_CORE_BARS_MAX; i++) { if (bars[i].vaddr) pci_iounmap(pdsc->pdev, bars[i].vaddr); @@ -293,7 +298,7 @@ err_out_stop: err_out_teardown: pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING); err_out_unmap_bars: - del_timer_sync(&pdsc->wdtimer); + timer_shutdown_sync(&pdsc->wdtimer); if (pdsc->wq) destroy_workqueue(pdsc->wq); mutex_destroy(&pdsc->config_lock); @@ -420,7 +425,7 @@ static void pdsc_remove(struct pci_dev *pdev) */ pdsc_sriov_configure(pdev, 0); - del_timer_sync(&pdsc->wdtimer); + timer_shutdown_sync(&pdsc->wdtimer); if (pdsc->wq) destroy_workqueue(pdsc->wq); @@ -433,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev) mutex_destroy(&pdsc->config_lock); mutex_destroy(&pdsc->devcmd_lock); - pci_free_irq_vectors(pdev); pdsc_unmap_bars(pdsc); pci_release_regions(pdev); } @@ -445,12 +449,76 @@ static void pdsc_remove(struct pci_dev *pdev) devlink_free(dl); } +static void pdsc_stop_health_thread(struct pdsc *pdsc) +{ + timer_shutdown_sync(&pdsc->wdtimer); + if (pdsc->health_work.func) + cancel_work_sync(&pdsc->health_work); +} + +static void pdsc_restart_health_thread(struct pdsc *pdsc) +{ + timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0); + mod_timer(&pdsc->wdtimer, jiffies + 1); +} + +void pdsc_reset_prepare(struct pci_dev *pdev) +{ + struct pdsc *pdsc = pci_get_drvdata(pdev); + + pdsc_stop_health_thread(pdsc); + pdsc_fw_down(pdsc); + + pdsc_unmap_bars(pdsc); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +void pdsc_reset_done(struct pci_dev *pdev) +{ + struct pdsc *pdsc = pci_get_drvdata(pdev); + struct device *dev = pdsc->dev; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err)); + return; + } + pci_set_master(pdev); + + if (!pdev->is_virtfn) { + pcie_print_link_status(pdsc->pdev); + + err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME); + if (err) { + dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n", + ERR_PTR(err)); + return; + } + + err = pdsc_map_bars(pdsc); + if (err) + return; + } + + pdsc_fw_up(pdsc); + pdsc_restart_health_thread(pdsc); +} + +static const struct pci_error_handlers pdsc_err_handler = { + /* FLR handling */ + .reset_prepare = pdsc_reset_prepare, + .reset_done = pdsc_reset_done, +}; + static struct pci_driver pdsc_driver = { .name = PDS_CORE_DRV_NAME, .id_table = pdsc_id_table, .probe = pdsc_probe, .remove = pdsc_remove, .sriov_configure = pdsc_sriov_configure, + .err_handler = &pdsc_err_handler, }; void *pdsc_get_pf_struct(struct pci_dev *vf_pdev) diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 33bb539ad7..c78706d21a 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1487,7 +1487,7 @@ static int sunlance_sbus_probe(struct platform_device *op) return err; } -static int sunlance_sbus_remove(struct platform_device *op) +static void sunlance_sbus_remove(struct platform_device *op) { struct lance_private *lp = platform_get_drvdata(op); struct net_device *net_dev = lp->dev; @@ -1497,8 +1497,6 @@ static int sunlance_sbus_remove(struct platform_device *op) lance_free_hwresources(lp); free_netdev(net_dev); - - return 0; } static const struct of_device_id sunlance_sbus_match[] = { @@ -1516,7 +1514,7 @@ static struct platform_driver sunlance_sbus_driver = { .of_match_table = sunlance_sbus_match, }, .probe = sunlance_sbus_probe, - .remove = sunlance_sbus_remove, + .remove_new = sunlance_sbus_remove, }; module_platform_driver(sunlance_sbus_driver); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 4d790a89fe..9131020d06 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -123,9 +123,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -135,17 +133,6 @@ #include "xgbe-common.h" #ifdef CONFIG_ACPI -static const struct acpi_device_id xgbe_acpi_match[]; - -static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata) -{ - const struct acpi_device_id *id; - - id = acpi_match_device(xgbe_acpi_match, pdata->dev); - - return id ? (struct xgbe_version_data *)id->driver_data : NULL; -} - static int xgbe_acpi_support(struct xgbe_prv_data *pdata) { struct device *dev = pdata->dev; @@ -173,11 +160,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata) return 0; } #else /* CONFIG_ACPI */ -static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata) -{ - return NULL; -} - static int xgbe_acpi_support(struct xgbe_prv_data *pdata) { return -EINVAL; @@ -185,17 +167,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata) #endif /* CONFIG_ACPI */ #ifdef CONFIG_OF -static const struct of_device_id xgbe_of_match[]; - -static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata) -{ - const struct of_device_id *id; - - id = of_match_device(xgbe_of_match, pdata->dev); - - return id ? (struct xgbe_version_data *)id->data : NULL; -} - static int xgbe_of_support(struct xgbe_prv_data *pdata) { struct device *dev = pdata->dev; @@ -244,11 +215,6 @@ static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata) return phy_pdev; } #else /* CONFIG_OF */ -static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata) -{ - return NULL; -} - static int xgbe_of_support(struct xgbe_prv_data *pdata) { return -EINVAL; @@ -290,12 +256,6 @@ static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata) return phy_pdev; } -static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata) -{ - return pdata->use_acpi ? xgbe_acpi_vdata(pdata) - : xgbe_of_vdata(pdata); -} - static int xgbe_platform_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; @@ -321,7 +281,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) pdata->use_acpi = dev->of_node ? 0 : 1; /* Get the version data */ - pdata->vdata = xgbe_get_vdata(pdata); + pdata->vdata = (struct xgbe_version_data *)device_get_match_data(dev); phy_pdev = xgbe_get_phy_pdev(pdata); if (!phy_pdev) { @@ -512,7 +472,7 @@ err_alloc: return ret; } -static int xgbe_platform_remove(struct platform_device *pdev) +static void xgbe_platform_remove(struct platform_device *pdev) { struct xgbe_prv_data *pdata = platform_get_drvdata(pdev); @@ -521,8 +481,6 @@ static int xgbe_platform_remove(struct platform_device *pdev) platform_device_put(pdata->phy_platdev); xgbe_free_pdata(pdata); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -615,7 +573,7 @@ static struct platform_driver xgbe_driver = { .pm = &xgbe_platform_pm_ops, }, .probe = xgbe_platform_probe, - .remove = xgbe_platform_remove, + .remove_new = xgbe_platform_remove, }; int xgbe_platform_init(void) diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 379d19d18d..9e90c23814 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -690,7 +690,7 @@ err: return ret; } -static int xge_remove(struct platform_device *pdev) +static void xge_remove(struct platform_device *pdev) { struct xge_pdata *pdata; struct net_device *ndev; @@ -706,8 +706,6 @@ static int xge_remove(struct platform_device *pdev) xge_mdio_remove(ndev); unregister_netdev(ndev); free_netdev(ndev); - - return 0; } static void xge_shutdown(struct platform_device *pdev) @@ -736,7 +734,7 @@ static struct platform_driver xge_driver = { .acpi_match_table = ACPI_PTR(xge_acpi_match), }, .probe = xge_probe, - .remove = xge_remove, + .remove_new = xge_remove, .shutdown = xge_shutdown, }; module_platform_driver(xge_driver); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index f3305c434c..44900026d1 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -2018,7 +2018,6 @@ static int xgene_enet_probe(struct platform_device *pdev) struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; void (*link_state)(struct work_struct *); - const struct of_device_id *of_id; int ret; ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), @@ -2039,19 +2038,7 @@ static int xgene_enet_probe(struct platform_device *pdev) NETIF_F_GRO | NETIF_F_SG; - of_id = of_match_device(xgene_enet_of_match, &pdev->dev); - if (of_id) { - pdata->enet_id = (uintptr_t)of_id->data; - } -#ifdef CONFIG_ACPI - else { - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev); - if (acpi_id) - pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data; - } -#endif + pdata->enet_id = (enum xgene_enet_id)device_get_match_data(&pdev->dev); if (!pdata->enet_id) { ret = -ENODEV; goto err; @@ -2127,7 +2114,7 @@ err: return ret; } -static int xgene_enet_remove(struct platform_device *pdev) +static void xgene_enet_remove(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; struct net_device *ndev; @@ -2149,8 +2136,6 @@ static int xgene_enet_remove(struct platform_device *pdev) xgene_enet_delete_desc_rings(pdata); pdata->port_ops->shutdown(pdata); free_netdev(ndev); - - return 0; } static void xgene_enet_shutdown(struct platform_device *pdev) @@ -2174,7 +2159,7 @@ static struct platform_driver xgene_enet_driver = { .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), }, .probe = xgene_enet_probe, - .remove = xgene_enet_remove, + .remove_new = xgene_enet_remove, .shutdown = xgene_enet_shutdown, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 643f5e6467..bce2c19e3f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -15,9 +15,10 @@ #include #include #include -#include +#include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 8775c3234e..766ab78256 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -739,7 +739,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Macintosh MACE ethernet driver"); MODULE_ALIAS("platform:macmace"); -static int mac_mace_device_remove(struct platform_device *pdev) +static void mac_mace_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct mace_data *mp = netdev_priv(dev); @@ -755,13 +755,11 @@ static int mac_mace_device_remove(struct platform_device *pdev) mp->tx_ring, mp->tx_ring_phys); free_netdev(dev); - - return 0; } static struct platform_driver mac_mace_driver = { .probe = mace_probe, - .remove = mac_mace_device_remove, + .remove_new = mac_mace_device_remove, .driver = { .name = mac_mace_string, }, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 28c9b6f1a5..5acb3e16b5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -953,8 +953,6 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) { struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; unsigned int tx_ring_idx, rx_ring_idx; - struct aq_ring_s *hwts; - struct aq_ring_s *ring; int err; if (!aq_ptp) @@ -962,29 +960,23 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); - ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic, - tx_ring_idx, &aq_nic->aq_nic_cfg); - if (!ring) { - err = -ENOMEM; + err = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic, + tx_ring_idx, &aq_nic->aq_nic_cfg); + if (err) goto err_exit; - } rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); - ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic, - rx_ring_idx, &aq_nic->aq_nic_cfg); - if (!ring) { - err = -ENOMEM; + err = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic, + rx_ring_idx, &aq_nic->aq_nic_cfg); + if (err) goto err_exit_ptp_tx; - } - hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX, - aq_nic->aq_nic_cfg.rxds, - aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size); - if (!hwts) { - err = -ENOMEM; + err = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX, + aq_nic->aq_nic_cfg.rxds, + aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size); + if (err) goto err_exit_ptp_rx; - } err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds); if (err != 0) { @@ -1001,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) return 0; err_exit_hwts_rx: - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); err_exit_ptp_rx: aq_ring_free(&aq_ptp->ptp_rx); err_exit_ptp_tx: @@ -1019,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic) aq_ring_free(&aq_ptp->ptp_tx); aq_ring_free(&aq_ptp->ptp_rx); - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); aq_ptp_skb_ring_release(&aq_ptp->skb_ring); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index e1885c1eb1..f7433abd65 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -132,8 +132,8 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf) return 0; } -static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, - struct aq_nic_s *aq_nic) +static int aq_ring_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic) { int err = 0; @@ -156,46 +156,29 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, err_exit: if (err < 0) { aq_ring_free(self); - self = NULL; } - return self; + return err; } -struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, - struct aq_nic_s *aq_nic, - unsigned int idx, - struct aq_nic_cfg_s *aq_nic_cfg) +int aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) { - int err = 0; - self->aq_nic = aq_nic; self->idx = idx; self->size = aq_nic_cfg->txds; self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; - self = aq_ring_alloc(self, aq_nic); - if (!self) { - err = -ENOMEM; - goto err_exit; - } - -err_exit: - if (err < 0) { - aq_ring_free(self); - self = NULL; - } - - return self; + return aq_ring_alloc(self, aq_nic); } -struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, - struct aq_nic_s *aq_nic, - unsigned int idx, - struct aq_nic_cfg_s *aq_nic_cfg) +int aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg) { - int err = 0; - self->aq_nic = aq_nic; self->idx = idx; self->size = aq_nic_cfg->rxds; @@ -217,22 +200,10 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, self->tail_size = 0; } - self = aq_ring_alloc(self, aq_nic); - if (!self) { - err = -ENOMEM; - goto err_exit; - } - -err_exit: - if (err < 0) { - aq_ring_free(self); - self = NULL; - } - - return self; + return aq_ring_alloc(self, aq_nic); } -struct aq_ring_s * +int aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, unsigned int size, unsigned int dx_size) { @@ -250,10 +221,10 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, GFP_KERNEL); if (!self->dx_ring) { aq_ring_free(self); - return NULL; + return -ENOMEM; } - return self; + return 0; } int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) @@ -948,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self) } } +void aq_ring_hwts_rx_free(struct aq_ring_s *self) +{ + if (!self) + return; + + if (self->dx_ring) { + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size + AQ_CFG_RXDS_DEF, + self->dx_ring, self->dx_ring_pa); + self->dx_ring = NULL; + } +} + unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) { unsigned int count; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 0a6c34438c..d627ace850 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -183,14 +183,14 @@ static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self) self->sw_head - self->sw_tail - 1); } -struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, - struct aq_nic_s *aq_nic, - unsigned int idx, - struct aq_nic_cfg_s *aq_nic_cfg); -struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, - struct aq_nic_s *aq_nic, - unsigned int idx, - struct aq_nic_cfg_s *aq_nic_cfg); +int aq_ring_tx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); +int aq_ring_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, + unsigned int idx, + struct aq_nic_cfg_s *aq_nic_cfg); int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type); void aq_ring_rx_deinit(struct aq_ring_s *self); @@ -207,9 +207,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int budget); int aq_ring_rx_fill(struct aq_ring_s *self); -struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self, - struct aq_nic_s *aq_nic, unsigned int idx, - unsigned int size, unsigned int dx_size); +int aq_ring_hwts_rx_alloc(struct aq_ring_s *self, + struct aq_nic_s *aq_nic, unsigned int idx, + unsigned int size, unsigned int dx_size); +void aq_ring_hwts_rx_free(struct aq_ring_s *self); void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic); unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f5db1c44e9..9769ab4f9b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -136,35 +136,32 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg, i, idx); - ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, - idx_ring, aq_nic_cfg); - if (!ring) { - err = -ENOMEM; + ring = &self->ring[i][AQ_VEC_TX_ID]; + err = aq_ring_tx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg); + if (err) goto err_exit; - } ++self->tx_rings; aq_nic_set_tx_ring(aq_nic, idx_ring, ring); - if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + ring = &self->ring[i][AQ_VEC_RX_ID]; + if (xdp_rxq_info_reg(&ring->xdp_rxq, aq_nic->ndev, idx, self->napi.napi_id) < 0) { err = -ENOMEM; goto err_exit; } - if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + if (xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL) < 0) { - xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); + xdp_rxq_info_unreg(&ring->xdp_rxq); err = -ENOMEM; goto err_exit; } - ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, - idx_ring, aq_nic_cfg); - if (!ring) { - xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); - err = -ENOMEM; + err = aq_ring_rx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg); + if (err) { + xdp_rxq_info_unreg(&ring->xdp_rxq); goto err_exit; } diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c index ce3147e886..a3afddb23e 100644 --- a/drivers/net/ethernet/arc/emac_arc.c +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -58,14 +58,12 @@ out_netdev: return err; } -static int emac_arc_remove(struct platform_device *pdev) +static void emac_arc_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); arc_emac_remove(ndev); free_netdev(ndev); - - return 0; } static const struct of_device_id emac_arc_dt_ids[] = { @@ -76,7 +74,7 @@ MODULE_DEVICE_TABLE(of, emac_arc_dt_ids); static struct platform_driver emac_arc_driver = { .probe = emac_arc_probe, - .remove = emac_arc_remove, + .remove_new = emac_arc_remove, .driver = { .name = DRV_NAME, .of_match_table = emac_arc_dt_ids, diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 5091011122..493d6356c8 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -244,7 +244,7 @@ out_netdev: return err; } -static int emac_rockchip_remove(struct platform_device *pdev) +static void emac_rockchip_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct rockchip_priv_data *priv = netdev_priv(ndev); @@ -260,12 +260,11 @@ static int emac_rockchip_remove(struct platform_device *pdev) clk_disable_unprepare(priv->macclk); free_netdev(ndev); - return 0; } static struct platform_driver emac_rockchip_driver = { .probe = emac_rockchip_probe, - .remove = emac_rockchip_remove, + .remove_new = emac_rockchip_remove, .driver = { .name = DRV_NAME, .of_match_table = emac_rockchip_dt_ids, diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c index 916ae380a0..7d2fe2e5af 100644 --- a/drivers/net/ethernet/asix/ax88796c_ioctl.c +++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c @@ -24,7 +24,7 @@ static void ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { /* Inherit standard device info */ - strncpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); } static u32 ax88796c_get_msglevel(struct net_device *ndev) diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 009e0b3066..0f2f400b5b 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1968,21 +1968,19 @@ err_put_clk: return err; } -static int ag71xx_remove(struct platform_device *pdev) +static void ag71xx_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ag71xx *ag; if (!ndev) - return 0; + return; ag = netdev_priv(ndev); unregister_netdev(ndev); ag71xx_mdio_remove(ag); clk_disable_unprepare(ag->clk_eth); platform_set_drvdata(pdev, NULL); - - return 0; } static const u32 ar71xx_fifo_ar7100[] = { @@ -2069,7 +2067,7 @@ static const struct of_device_id ag71xx_match[] = { static struct platform_driver ag71xx_driver = { .probe = ag71xx_probe, - .remove = ag71xx_remove, + .remove_new = ag71xx_remove, .driver = { .name = "ag71xx", .of_match_table = ag71xx_match, diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 74b78164cf..46cdc32b4e 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -842,7 +842,8 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter) } static inline void atl1c_clean_buffer(struct pci_dev *pdev, - struct atl1c_buffer *buffer_info) + struct atl1c_buffer *buffer_info, + int budget) { u16 pci_driection; if (buffer_info->flags & ATL1C_BUFFER_FREE) @@ -861,7 +862,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev, buffer_info->length, pci_driection); } if (buffer_info->skb) - dev_consume_skb_any(buffer_info->skb); + napi_consume_skb(buffer_info->skb, budget); buffer_info->dma = 0; buffer_info->skb = NULL; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); @@ -882,7 +883,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, ring_count = tpd_ring->count; for (index = 0; index < ring_count; index++) { buffer_info = &tpd_ring->buffer_info[index]; - atl1c_clean_buffer(pdev, buffer_info); + atl1c_clean_buffer(pdev, buffer_info, 0); } netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue)); @@ -909,7 +910,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue) for (j = 0; j < rfd_ring->count; j++) { buffer_info = &rfd_ring->buffer_info[j]; - atl1c_clean_buffer(pdev, buffer_info); + atl1c_clean_buffer(pdev, buffer_info, 0); } /* zero out the descriptor ring */ memset(rfd_ring->desc, 0, rfd_ring->size); @@ -1607,7 +1608,7 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget) total_bytes += buffer_info->skb->len; total_packets++; } - atl1c_clean_buffer(pdev, buffer_info); + atl1c_clean_buffer(pdev, buffer_info, budget); if (++next_to_clean == tpd_ring->count) next_to_clean = 0; atomic_set(&tpd_ring->next_to_clean, next_to_clean); @@ -2151,7 +2152,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt, while (index != tpd_ring->next_to_use) { tpd = ATL1C_TPD_DESC(tpd_ring, index); buffer_info = &tpd_ring->buffer_info[index]; - atl1c_clean_buffer(adpt->pdev, buffer_info); + atl1c_clean_buffer(adpt->pdev, buffer_info, 0); memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); if (++index == tpd_ring->count) index = 0; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 02aa6fd8eb..a9014d7932 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2446,7 +2446,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget) static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) { - if (!napi_schedule_prep(&adapter->napi)) + if (!napi_schedule(&adapter->napi)) /* It is possible in case even the RX/TX ints are disabled via IMR * register the ISR bits are set anyway (but do not produce IRQ). * To handle such situation the napi functions used to check is @@ -2454,8 +2454,6 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) */ return 0; - __napi_schedule(&adapter->napi); - /* * Disable RX/TX ints via IMR register if it is * allowed. NAPI handler must reenable them in same diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 1b487c071c..bcfc948812 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1377,7 +1377,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 5 * HZ; netdev->min_mtu = 40; netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index 41a6098eb0..80245c65cc 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -535,9 +535,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, int j = 0, i; for (i = 0; i < NUM_NET_FILTERS; i++) { - if (j == *rule_cnt) - return -EMSGSIZE; - if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -547,6 +544,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, priv->net_filters[i - 1].wake_filter) continue; + if (j == *rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = priv->net_filters[i].fs.location; } @@ -1344,17 +1344,15 @@ of_put_exit: return ret; } -static int bcmasp_remove(struct platform_device *pdev) +static void bcmasp_remove(struct platform_device *pdev) { struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev); if (!priv) - return 0; + return; priv->destroy_wol(priv); bcmasp_remove_intfs(priv); - - return 0; } static void bcmasp_shutdown(struct platform_device *pdev) @@ -1428,7 +1426,7 @@ static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops, static struct platform_driver bcmasp_driver = { .probe = bcmasp_probe, - .remove = bcmasp_remove, + .remove_new = bcmasp_remove, .shutdown = bcmasp_shutdown, .driver = { .name = "brcm,asp-v2", diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 53e5428812..9cae5a3090 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -1048,6 +1048,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) netdev_err(dev, "could not attach to PHY\n"); goto err_phy_disable; } + + /* Indicate that the MAC is responsible for PHY PM */ + phydev->mac_managed_pm = true; } else if (!intf->wolopts) { ret = phy_resume(dev->phydev); if (ret) diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 33d86683af..3e7c8671cd 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -768,7 +768,7 @@ err_dma_free: return err; } -static int bcm4908_enet_remove(struct platform_device *pdev) +static void bcm4908_enet_remove(struct platform_device *pdev) { struct bcm4908_enet *enet = platform_get_drvdata(pdev); @@ -776,8 +776,6 @@ static int bcm4908_enet_remove(struct platform_device *pdev) netif_napi_del(&enet->rx_ring.napi); netif_napi_del(&enet->tx_ring.napi); bcm4908_enet_dma_free(enet); - - return 0; } static const struct of_device_id bcm4908_enet_of_match[] = { @@ -791,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = { .of_match_table = bcm4908_enet_of_match, }, .probe = bcm4908_enet_probe, - .remove = bcm4908_enet_remove, + .remove_new = bcm4908_enet_remove, }; module_platform_driver(bcm4908_enet_driver); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index a741070f1f..3196c4dea0 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1902,7 +1902,7 @@ out: /* * exit func, stops hardware and unregisters netdevice */ -static int bcm_enet_remove(struct platform_device *pdev) +static void bcm_enet_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; @@ -1932,12 +1932,11 @@ static int bcm_enet_remove(struct platform_device *pdev) clk_disable_unprepare(priv->mac_clk); free_netdev(dev); - return 0; } static struct platform_driver bcm63xx_enet_driver = { .probe = bcm_enet_probe, - .remove = bcm_enet_remove, + .remove_new = bcm_enet_remove, .driver = { .name = "bcm63xx_enet", }, @@ -2531,8 +2530,8 @@ static int bcm_enetsw_get_sset_count(struct net_device *netdev, static void bcm_enetsw_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); - strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); + strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); } static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, @@ -2739,7 +2738,7 @@ out: /* exit func, stops hardware and unregisters netdevice */ -static int bcm_enetsw_remove(struct platform_device *pdev) +static void bcm_enetsw_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; @@ -2752,12 +2751,11 @@ static int bcm_enetsw_remove(struct platform_device *pdev) clk_disable_unprepare(priv->mac_clk); free_netdev(dev); - return 0; } static struct platform_driver bcm63xx_enetsw_driver = { .probe = bcm_enetsw_probe, - .remove = bcm_enetsw_remove, + .remove_new = bcm_enetsw_remove, .driver = { .name = "bcm63xx_enetsw", }, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index bf1611cce9..c9faa85408 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2430,7 +2430,7 @@ static int bcm_sysport_netdevice_event(struct notifier_block *nb, if (dev->netdev_ops != &bcm_sysport_netdev_ops) return NOTIFY_DONE; - if (!dsa_slave_dev_check(info->upper_dev)) + if (!dsa_user_dev_check(info->upper_dev)) return NOTIFY_DONE; if (info->linking) @@ -2648,7 +2648,7 @@ err_free_netdev: return ret; } -static int bcm_sysport_remove(struct platform_device *pdev) +static void bcm_sysport_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2663,8 +2663,6 @@ static int bcm_sysport_remove(struct platform_device *pdev) of_phy_deregister_fixed_link(dn); free_netdev(dev); dev_set_drvdata(&pdev->dev, NULL); - - return 0; } static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) @@ -2901,7 +2899,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, - .remove = bcm_sysport_remove, + .remove_new = bcm_sysport_remove, .driver = { .name = "brcm-systemport", .of_match_table = bcm_sysport_of_match, diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index b4381cd419..0b21fd5bd4 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -246,13 +246,11 @@ static int bgmac_probe(struct platform_device *pdev) return bgmac_enet_probe(bgmac); } -static int bgmac_remove(struct platform_device *pdev) +static void bgmac_remove(struct platform_device *pdev) { struct bgmac *bgmac = platform_get_drvdata(pdev); bgmac_enet_remove(bgmac); - - return 0; } #ifdef CONFIG_PM @@ -296,7 +294,7 @@ static struct platform_driver bgmac_enet_driver = { .pm = BGMAC_PM_OPS }, .probe = bgmac_probe, - .remove = bgmac_remove, + .remove_new = bgmac_remove, }; module_platform_driver(bgmac_enet_driver); diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 2bc2b707d6..ba6c239d52 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o +bnxt_en-$(CONFIG_BNXT_HWMON) += bnxt_hwmon.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dac4f9510c..22c8bfb5ed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -52,8 +52,6 @@ #include #include #include -#include -#include #include #include #include @@ -71,6 +69,7 @@ #include "bnxt_tc.h" #include "bnxt_devlink.h" #include "bnxt_debugfs.h" +#include "bnxt_hwmon.h" #define BNXT_TX_TIMEOUT (5 * HZ) #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ @@ -2145,7 +2144,68 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) return INVALID_HW_RING_ID; } -static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) +static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) +{ + if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) + return link_info->force_pam4_link_speed; + return link_info->force_link_speed; +} + +static void bnxt_set_force_speed(struct bnxt_link_info *link_info) +{ + link_info->req_link_speed = link_info->force_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + if (link_info->force_pam4_link_speed) { + link_info->req_link_speed = link_info->force_pam4_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; + } +} + +static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) +{ + link_info->advertising = link_info->auto_link_speeds; + link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; +} + +static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) +{ + if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && + link_info->req_link_speed != link_info->force_link_speed) + return true; + if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && + link_info->req_link_speed != link_info->force_pam4_link_speed) + return true; + return false; +} + +static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) +{ + if (link_info->advertising != link_info->auto_link_speeds || + link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) + return true; + return false; +} + +#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) + +#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) + +#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ + ((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) + +#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) + +/* Return true if the workqueue has to be scheduled */ +static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) { u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); @@ -2160,11 +2220,53 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { + u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); + char *threshold_type; + bool notify = false; + char *dir_str; + + switch (type) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: + threshold_type = "warning"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: + threshold_type = "critical"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: + threshold_type = "fatal"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: + threshold_type = "shutdown"; + break; + default: + netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); + return false; + } + if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { + dir_str = "above"; + notify = true; + } else { + dir_str = "below"; + } + netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", + dir_str, threshold_type); + netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", + BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), + BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); + if (notify) { + bp->thermal_threshold_type = type; + set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); + return true; + } + return false; + } default: netdev_err(bp->dev, "FW reported unknown error type %u\n", err_type); break; } + return false; } #define BNXT_GET_EVENT_PORT(data) \ @@ -2210,7 +2312,7 @@ static int bnxt_async_event_process(struct bnxt *bp, /* print unsupported speed warning in forced speed mode only */ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && (data1 & 0x20000)) { - u16 fw_speed = link_info->force_link_speed; + u16 fw_speed = bnxt_get_force_speed(link_info); u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); if (speed != SPEED_UNKNOWN) @@ -2365,7 +2467,8 @@ static int bnxt_async_event_process(struct bnxt *bp, goto async_event_process_exit; } case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { - bnxt_event_error_report(bp, data1, data2); + if (bnxt_event_error_report(bp, data1, data2)) + break; goto async_event_process_exit; } case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { @@ -3214,8 +3317,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, pp.dma_dir = bp->rx_dir; pp.max_len = PAGE_SIZE; pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) - pp.flags |= PP_FLAG_PAGE_FRAG; rxr->page_pool = page_pool_create(&pp); if (IS_ERR(rxr->page_pool)) { @@ -5825,7 +5926,7 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) if (BNXT_PF(bp)) { struct hwrm_func_cfg_input *req; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -6236,7 +6337,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, struct hwrm_func_cfg_input *req; u32 enables = 0; - if (hwrm_req_init(bp, req, HWRM_FUNC_CFG)) + if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) return NULL; req->fid = cpu_to_le16(0xffff); @@ -8581,7 +8682,7 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) else return -EINVAL; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -8599,7 +8700,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -9643,13 +9744,31 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported) return ((supported | diff) != supported); } +static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) +{ + /* Check if any advertised speeds are no longer supported. The caller + * holds the link_lock mutex, so we can modify link_info settings. + */ + if (bnxt_support_dropped(link_info->advertising, + link_info->support_auto_speeds)) { + link_info->advertising = link_info->support_auto_speeds; + return true; + } + if (bnxt_support_dropped(link_info->advertising_pam4, + link_info->support_pam4_auto_speeds)) { + link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; + return true; + } + return false; +} + int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { struct bnxt_link_info *link_info = &bp->link_info; struct hwrm_port_phy_qcfg_output *resp; struct hwrm_port_phy_qcfg_input *req; u8 link_state = link_info->link_state; - bool support_changed = false; + bool support_changed; int rc; rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); @@ -9763,19 +9882,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) if (!BNXT_PHY_CFG_ABLE(bp)) return 0; - /* Check if any advertised speeds are no longer supported. The caller - * holds the link_lock mutex, so we can modify link_info settings. - */ - if (bnxt_support_dropped(link_info->advertising, - link_info->support_auto_speeds)) { - link_info->advertising = link_info->support_auto_speeds; - support_changed = true; - } - if (bnxt_support_dropped(link_info->advertising_pam4, - link_info->support_pam4_auto_speeds)) { - link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; - support_changed = true; - } + support_changed = bnxt_support_speed_dropped(link_info); if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) bnxt_hwrm_set_link_setting(bp, true, false); return 0; @@ -10265,79 +10372,6 @@ static void bnxt_get_wol_settings(struct bnxt *bp) } while (handle && handle != 0xffff); } -#ifdef CONFIG_BNXT_HWMON -static ssize_t bnxt_show_temp(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct hwrm_temp_monitor_query_output *resp; - struct hwrm_temp_monitor_query_input *req; - struct bnxt *bp = dev_get_drvdata(dev); - u32 len = 0; - int rc; - - rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); - if (rc) - return rc; - resp = hwrm_req_hold(bp, req); - rc = hwrm_req_send(bp, req); - if (!rc) - len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ - hwrm_req_drop(bp, req); - if (rc) - return rc; - return len; -} -static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); - -static struct attribute *bnxt_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - NULL -}; -ATTRIBUTE_GROUPS(bnxt); - -static void bnxt_hwmon_close(struct bnxt *bp) -{ - if (bp->hwmon_dev) { - hwmon_device_unregister(bp->hwmon_dev); - bp->hwmon_dev = NULL; - } -} - -static void bnxt_hwmon_open(struct bnxt *bp) -{ - struct hwrm_temp_monitor_query_input *req; - struct pci_dev *pdev = bp->pdev; - int rc; - - rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); - if (!rc) - rc = hwrm_req_send_silent(bp, req); - if (rc == -EACCES || rc == -EOPNOTSUPP) { - bnxt_hwmon_close(bp); - return; - } - - if (bp->hwmon_dev) - return; - - bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, - DRV_MODULE_NAME, bp, - bnxt_groups); - if (IS_ERR(bp->hwmon_dev)) { - bp->hwmon_dev = NULL; - dev_warn(&pdev->dev, "Cannot register hwmon device\n"); - } -} -#else -static void bnxt_hwmon_close(struct bnxt *bp) -{ -} - -static void bnxt_hwmon_open(struct bnxt *bp) -{ -} -#endif - static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -10389,19 +10423,14 @@ static int bnxt_update_phy_setting(struct bnxt *bp) if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { if (BNXT_AUTO_MODE(link_info->auto_mode)) update_link = true; - if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && - link_info->req_link_speed != link_info->force_link_speed) - update_link = true; - else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && - link_info->req_link_speed != link_info->force_pam4_link_speed) + if (bnxt_force_speed_updated(link_info)) update_link = true; if (link_info->req_duplex != link_info->duplex_setting) update_link = true; } else { if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) update_link = true; - if (link_info->advertising != link_info->auto_link_speeds || - link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) + if (bnxt_auto_speed_updated(link_info)) update_link = true; } @@ -10669,7 +10698,6 @@ static int bnxt_open(struct net_device *dev) bnxt_reenable_sriov(bp); } } - bnxt_hwmon_open(bp); } return rc; @@ -10755,7 +10783,6 @@ static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); bnxt_hwrm_if_change(bp, false); @@ -12025,16 +12052,9 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp) } else { link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; } - link_info->advertising = link_info->auto_link_speeds; - link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; + bnxt_set_auto_speed(link_info); } else { - link_info->req_link_speed = link_info->force_link_speed; - link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; - if (link_info->force_pam4_link_speed) { - link_info->req_link_speed = - link_info->force_pam4_link_speed; - link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; - } + bnxt_set_force_speed(link_info); link_info->req_duplex = link_info->duplex_setting; } if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) @@ -12130,6 +12150,9 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) bnxt_fw_echo_reply(bp); + if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) + bnxt_hwmon_notify_event(bp); + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They * must be the last functions to be called before exiting. */ @@ -12258,6 +12281,20 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +/* FW that pre-reserves 1 VNIC per function */ +static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) +{ + u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); + + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && + (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) + return true; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && + (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) + return true; + return false; +} + static int bnxt_fw_init_one_p1(struct bnxt *bp) { int rc; @@ -12319,6 +12356,9 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) if (rc) return -ENODEV; + if (bnxt_fw_pre_resv_vnics(bp)) + bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; + bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); @@ -12326,6 +12366,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) if (bp->fw_cap & BNXT_FW_CAP_PTP) __bnxt_hwrm_ptp_qcfg(bp); bnxt_dcb_init(bp); + bnxt_hwmon_init(bp); return 0; } @@ -13229,6 +13270,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); + bnxt_hwmon_uninit(bp); bnxt_ethtool_free(bp); bnxt_dcb_free(bp); kfree(bp->ptp_cfg); @@ -13825,6 +13867,7 @@ init_err_dl: init_err_pci_clean: bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); + bnxt_hwmon_uninit(bp); bnxt_ethtool_free(bp); bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 0116f67593..a7d7b09ea1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1301,6 +1301,7 @@ struct bnxt_link_info { u8 req_signal_mode; #define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ #define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 +#define BNXT_SIG_MODE_MAX (PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST + 1) u8 req_duplex; u8 req_flow_ctrl; u16 req_link_speed; @@ -2019,6 +2020,9 @@ struct bnxt { #define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30) #define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(31) #define BNXT_FW_CAP_PTP BIT_ULL(32) + #define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33) + #define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34) + #define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35) u32 fw_dbg_cap; @@ -2059,6 +2063,7 @@ struct bnxt { #define BNXT_FW_VER_CODE(maj, min, bld, rsv) \ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) #define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48) +#define BNXT_FW_BLD(bp) (((bp)->fw_ver_code >> 16) & 0xffff) u16 vxlan_fw_dst_port_id; u16 nge_fw_dst_port_id; @@ -2096,6 +2101,7 @@ struct bnxt { #define BNXT_FW_RESET_NOTIFY_SP_EVENT 18 #define BNXT_FW_EXCEPTION_SP_EVENT 19 #define BNXT_LINK_CFG_CHANGE_SP_EVENT 21 +#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22 #define BNXT_FW_ECHO_REQUEST_SP_EVENT 23 struct delayed_work fw_reset_task; @@ -2191,7 +2197,14 @@ struct bnxt { struct bnxt_tc_info *tc_info; struct list_head tc_indr_block_list; struct dentry *debugfs_pdev; +#ifdef CONFIG_BNXT_HWMON struct device *hwmon_dev; + u8 warn_thresh_temp; + u8 crit_thresh_temp; + u8 fatal_thresh_temp; + u8 shutdown_thresh_temp; +#endif + u32 thermal_threshold_type; enum board_idx board_idx; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 9d39f194b2..89809f1b12 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -62,7 +62,7 @@ static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset) if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) return -EOPNOTSUPP; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -104,20 +104,21 @@ static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, struct bnxt *bp = devlink_health_reporter_priv(reporter); struct bnxt_fw_health *h = bp->fw_health; u32 fw_status, fw_resets; - int rc; - if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) - return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); + return 0; + } - if (!h->status_reliable) - return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); + if (!h->status_reliable) { + devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); + return 0; + } mutex_lock(&h->lock); fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); if (BNXT_FW_IS_BOOTING(fw_status)) { - rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) { if (!h->severity) { h->severity = SEVERITY_FATAL; @@ -126,58 +127,35 @@ static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, devlink_health_report(h->fw_reporter, "FW error diagnosed", h); } - rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error"); - if (rc) - goto unlock; - rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Status", "error"); + devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); } else { - rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); } - rc = devlink_fmsg_string_pair_put(fmsg, "Severity", - bnxt_health_severity_str(h->severity)); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Severity", + bnxt_health_severity_str(h->severity)); if (h->severity) { - rc = devlink_fmsg_string_pair_put(fmsg, "Remedy", - bnxt_health_remedy_str(h->remedy)); - if (rc) - goto unlock; - if (h->remedy == REMEDY_DEVLINK_RECOVER) { - rc = devlink_fmsg_string_pair_put(fmsg, "Impact", - "traffic+ntuple_cfg"); - if (rc) - goto unlock; - } + devlink_fmsg_string_pair_put(fmsg, "Remedy", + bnxt_health_remedy_str(h->remedy)); + if (h->remedy == REMEDY_DEVLINK_RECOVER) + devlink_fmsg_string_pair_put(fmsg, "Impact", + "traffic+ntuple_cfg"); } -unlock: mutex_unlock(&h->lock); - if (rc || !h->resets_reliable) - return rc; + if (!h->resets_reliable) + return 0; fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); - if (rc) - return rc; - return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); + devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); + devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); + devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); + devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); + devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); + devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); + return 0; } static int bnxt_fw_dump(struct devlink_health_reporter *reporter, @@ -203,19 +181,12 @@ static int bnxt_fw_dump(struct devlink_health_reporter *reporter, rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len); if (!rc) { - rc = devlink_fmsg_pair_nest_start(fmsg, "core"); - if (rc) - goto exit; - rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); - if (rc) - goto exit; - rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); - if (rc) - goto exit; - rc = devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_pair_nest_start(fmsg, "core"); + devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); + devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); + devlink_fmsg_pair_nest_end(fmsg); } -exit: vfree(data); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3c36dd8051..5f67a7f94e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -8,6 +8,7 @@ * the Free Software Foundation. */ +#include #include #include #include @@ -533,6 +534,7 @@ static int bnxt_get_num_ring_stats(struct bnxt *bp) static int bnxt_get_num_stats(struct bnxt *bp) { int num_stats = bnxt_get_num_ring_stats(bp); + int len; num_stats += BNXT_NUM_RING_ERR_STATS; @@ -540,8 +542,12 @@ static int bnxt_get_num_stats(struct bnxt *bp) num_stats += BNXT_NUM_PORT_STATS; if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - num_stats += bp->fw_rx_stats_ext_size + - bp->fw_tx_stats_ext_size; + len = min_t(int, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + num_stats += len; + len = min_t(int, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + num_stats += len; if (bp->pri2cos_valid) num_stats += BNXT_NUM_STATS_PRI; } @@ -651,12 +657,17 @@ skip_ring_stats: if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; + u32 len; - for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++, j++) { buf[j] = *(rx_port_stats_ext + bnxt_port_stats_ext_arr[i].offset); } - for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++, j++) { buf[j] = *(tx_port_stats_ext + bnxt_tx_port_stats_ext_arr[i].offset); } @@ -755,11 +766,17 @@ skip_tpa_stats: } } if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { + u32 len; + + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++) { strcpy(buf, bnxt_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; } - for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++) { strcpy(buf, bnxt_tx_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; @@ -1500,94 +1517,388 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) return speed_mask; } -#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ -{ \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 100baseT_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 1000baseT_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 10000baseT_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 25000baseCR_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 40000baseCR4_Full);\ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 50000baseCR2_Full);\ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 100000baseCR4_Full);\ - if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - Pause); \ - if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ - ethtool_link_ksettings_add_link_mode( \ - lk_ksettings, name, Asym_Pause);\ - } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - Asym_Pause); \ - } \ +enum bnxt_media_type { + BNXT_MEDIA_UNKNOWN = 0, + BNXT_MEDIA_TP, + BNXT_MEDIA_CR, + BNXT_MEDIA_SR, + BNXT_MEDIA_LR_ER_FR, + BNXT_MEDIA_KR, + BNXT_MEDIA_KX, + BNXT_MEDIA_X, + __BNXT_MEDIA_END, +}; + +static const enum bnxt_media_type bnxt_phy_types[] = { + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, +}; + +static enum bnxt_media_type +bnxt_get_media(struct bnxt_link_info *link_info) +{ + switch (link_info->media_type) { + case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: + return BNXT_MEDIA_TP; + case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: + return BNXT_MEDIA_CR; + default: + if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) + return bnxt_phy_types[link_info->phy_type]; + return BNXT_MEDIA_UNKNOWN; + } +} + +enum bnxt_link_speed_indices { + BNXT_LINK_SPEED_UNKNOWN = 0, + BNXT_LINK_SPEED_100MB_IDX, + BNXT_LINK_SPEED_1GB_IDX, + BNXT_LINK_SPEED_10GB_IDX, + BNXT_LINK_SPEED_25GB_IDX, + BNXT_LINK_SPEED_40GB_IDX, + BNXT_LINK_SPEED_50GB_IDX, + BNXT_LINK_SPEED_100GB_IDX, + BNXT_LINK_SPEED_200GB_IDX, + __BNXT_LINK_SPEED_END +}; + +static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) +{ + switch (speed) { + case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; + case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; + case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; + case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; + case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; + case BNXT_LINK_SPEED_50GB: return BNXT_LINK_SPEED_50GB_IDX; + case BNXT_LINK_SPEED_100GB: return BNXT_LINK_SPEED_100GB_IDX; + case BNXT_LINK_SPEED_200GB: return BNXT_LINK_SPEED_200GB_IDX; + default: return BNXT_LINK_SPEED_UNKNOWN; + } +} + +static const enum ethtool_link_mode_bit_indices +bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { + [BNXT_LINK_SPEED_100MB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_1GB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + /* historically baseT, but DAC is more correctly baseX */ + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_10GB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_25GB_IDX] = { + { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_40GB_IDX] = { + { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_50GB_IDX] = { + [BNXT_SIG_MODE_NRZ] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_100GB_IDX] = { + [BNXT_SIG_MODE_NRZ] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_200GB_IDX] = { + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + }, + }, +}; + +#define BNXT_LINK_MODE_UNKNOWN -1 + +static enum ethtool_link_mode_bit_indices +bnxt_get_link_mode(struct bnxt_link_info *link_info) +{ + enum ethtool_link_mode_bit_indices link_mode; + enum bnxt_link_speed_indices speed; + enum bnxt_media_type media; + u8 sig_mode; + + if (link_info->phy_link_status != BNXT_LINK_LINK) + return BNXT_LINK_MODE_UNKNOWN; + + media = bnxt_get_media(link_info); + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + speed = bnxt_fw_speed_idx(link_info->link_speed); + sig_mode = link_info->active_fec_sig_mode & + PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; + } else { + speed = bnxt_fw_speed_idx(link_info->req_link_speed); + sig_mode = link_info->req_signal_mode; + } + if (sig_mode >= BNXT_SIG_MODE_MAX) + return BNXT_LINK_MODE_UNKNOWN; + + /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux + * link mode, but since no such devices exist, the zeroes in the + * map can be conveniently used to represent unknown link modes. + */ + link_mode = bnxt_link_modes[speed][sig_mode][media]; + if (!link_mode) + return BNXT_LINK_MODE_UNKNOWN; + + switch (link_mode) { + case ETHTOOL_LINK_MODE_100baseT_Full_BIT: + if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) + link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; + break; + case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: + if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) + link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; + break; + default: + break; + } + + return link_mode; +} + +static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.supported); + } + + if (link_info->support_auto_speeds || link_info->support_pam4_auto_speeds) + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lk_ksettings->link_modes.supported); + + if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + return; + + if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.advertising); + if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.advertising); + if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.lp_advertising); + if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.lp_advertising); } -#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ -{ \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100baseT_Full) || \ - ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100baseT_Half)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 1000baseT_Full) || \ - ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 1000baseT_Half)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 10000baseT_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 25000baseCR_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 40000baseCR4_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 50000baseCR2_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100000baseCR4_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ +static const u16 bnxt_nrz_speed_masks[] = { + [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, + [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, + [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, + [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, + [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, + [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ +}; + +static const u16 bnxt_pam4_speed_masks[] = { + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, + [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, +}; + +static enum bnxt_link_speed_indices +bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk) +{ + const u16 *speeds; + int idx, len; + + switch (sig_mode) { + case BNXT_SIG_MODE_NRZ: + speeds = bnxt_nrz_speed_masks; + len = ARRAY_SIZE(bnxt_nrz_speed_masks); + break; + case BNXT_SIG_MODE_PAM4: + speeds = bnxt_pam4_speed_masks; + len = ARRAY_SIZE(bnxt_pam4_speed_masks); + break; + default: + return BNXT_LINK_SPEED_UNKNOWN; + } + + for (idx = 0; idx < len; idx++) { + if (speeds[idx] == speed_msk) + return idx; + } + + return BNXT_LINK_SPEED_UNKNOWN; } -#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ -{ \ - if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 50000baseCR_Full); \ - if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 100000baseCR2_Full);\ - if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 200000baseCR4_Full);\ +#define BNXT_FW_SPEED_MSK_BITS 16 + +static void +__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, + u8 sig_mode, unsigned long *et_mask) +{ + enum ethtool_link_mode_bit_indices link_mode; + enum bnxt_link_speed_indices speed; + u8 bit; + + for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { + speed = bnxt_encoding_speed_idx(sig_mode, 1 << bit); + if (!speed) + continue; + + link_mode = bnxt_link_modes[speed][sig_mode][media]; + if (!link_mode) + continue; + + linkmode_set_bit(link_mode, et_mask); + } } -#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ -{ \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 50000baseCR_Full)) \ - (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100000baseCR2_Full)) \ - (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 200000baseCR4_Full)) \ - (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \ +static void +bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, + u8 sig_mode, unsigned long *et_mask) +{ + if (media) { + __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask); + return; + } + + /* list speeds for all media if unknown */ + for (media = 1; media < __BNXT_MEDIA_END; media++) + __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask); +} + +static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, + u16 speed_msk, const unsigned long *et_mask, + enum ethtool_link_mode_bit_indices mode) +{ + bool mode_desired = linkmode_test_bit(mode, et_mask); + + if (!mode) + return; + + /* enabled speeds for installed media should override */ + if (installed_media && mode_desired) { + *speeds |= speed_msk; + *delta |= speed_msk; + return; + } + + /* many to one mapping, only allow one change per fw_speed bit */ + if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { + *speeds ^= speed_msk; + *delta |= speed_msk; + } +} + +static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, + const unsigned long *et_mask) +{ + enum bnxt_media_type media = bnxt_get_media(link_info); + u32 delta_pam4 = 0; + u32 delta_nrz = 0; + int i, m; + + for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { + /* accept any legal media from user */ + for (m = 1; m < __BNXT_MEDIA_END; m++) { + bnxt_update_speed(&delta_nrz, m == media, + &link_info->advertising, + bnxt_nrz_speed_masks[i], et_mask, + bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); + bnxt_update_speed(&delta_pam4, m == media, + &link_info->advertising_pam4, + bnxt_pam4_speed_masks[i], et_mask, + bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); + } + } } static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, @@ -1611,36 +1922,6 @@ static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, lk_ksettings->link_modes.advertising); } -static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, - struct ethtool_link_ksettings *lk_ksettings) -{ - u16 fw_speeds = link_info->advertising; - u8 fw_pause = 0; - - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) - fw_pause = link_info->auto_pause_setting; - - BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); - fw_speeds = link_info->advertising_pam4; - BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising); - bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); -} - -static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, - struct ethtool_link_ksettings *lk_ksettings) -{ - u16 fw_speeds = link_info->lp_auto_link_speeds; - u8 fw_pause = 0; - - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) - fw_pause = link_info->lp_pause; - - BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, - lp_advertising); - fw_speeds = link_info->lp_auto_pam4_link_speeds; - BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising); -} - static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, struct ethtool_link_ksettings *lk_ksettings) { @@ -1662,30 +1943,6 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, lk_ksettings->link_modes.supported); } -static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, - struct ethtool_link_ksettings *lk_ksettings) -{ - struct bnxt *bp = container_of(link_info, struct bnxt, link_info); - u16 fw_speeds = link_info->support_speeds; - - BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); - fw_speeds = link_info->support_pam4_speeds; - BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported); - - if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - Pause); - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - Asym_Pause); - } - - if (link_info->support_auto_speeds || - link_info->support_pam4_auto_speeds) - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - Autoneg); - bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); -} - u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) { switch (fw_link_speed) { @@ -1714,60 +1971,95 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) } } +static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, + struct bnxt_link_info *link_info) +{ + struct ethtool_link_settings *base = &lk_ksettings->base; + + if (link_info->link_state == BNXT_LINK_STATE_UP) { + base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + base->duplex = DUPLEX_HALF; + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; + } else if (!link_info->autoneg) { + base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); + base->duplex = DUPLEX_HALF; + if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; + } +} + static int bnxt_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *lk_ksettings) { - struct bnxt *bp = netdev_priv(dev); - struct bnxt_link_info *link_info = &bp->link_info; struct ethtool_link_settings *base = &lk_ksettings->base; - u32 ethtool_speed; + enum ethtool_link_mode_bit_indices link_mode; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info; + enum bnxt_media_type media; + ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); + ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + base->duplex = DUPLEX_UNKNOWN; + base->speed = SPEED_UNKNOWN; + link_info = &bp->link_info; + mutex_lock(&bp->link_lock); - bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); + bnxt_get_ethtool_modes(link_info, lk_ksettings); + media = bnxt_get_media(link_info); + bnxt_get_ethtool_speeds(link_info->support_speeds, + media, BNXT_SIG_MODE_NRZ, + lk_ksettings->link_modes.supported); + bnxt_get_ethtool_speeds(link_info->support_pam4_speeds, + media, BNXT_SIG_MODE_PAM4, + lk_ksettings->link_modes.supported); + bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); + link_mode = bnxt_get_link_mode(link_info); + if (link_mode != BNXT_LINK_MODE_UNKNOWN) + ethtool_params_from_link_mode(lk_ksettings, link_mode); + else + bnxt_get_default_speeds(lk_ksettings, link_info); - ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); if (link_info->autoneg) { - bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); - ethtool_link_ksettings_add_link_mode(lk_ksettings, - advertising, Autoneg); + bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lk_ksettings->link_modes.advertising); base->autoneg = AUTONEG_ENABLE; - base->duplex = DUPLEX_UNKNOWN; + bnxt_get_ethtool_speeds(link_info->advertising, + media, BNXT_SIG_MODE_NRZ, + lk_ksettings->link_modes.advertising); + bnxt_get_ethtool_speeds(link_info->advertising_pam4, + media, BNXT_SIG_MODE_PAM4, + lk_ksettings->link_modes.advertising); if (link_info->phy_link_status == BNXT_LINK_LINK) { - bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); - if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - base->duplex = DUPLEX_FULL; - else - base->duplex = DUPLEX_HALF; + bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, + media, BNXT_SIG_MODE_NRZ, + lk_ksettings->link_modes.lp_advertising); + bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, + media, BNXT_SIG_MODE_PAM4, + lk_ksettings->link_modes.lp_advertising); } - ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); } else { base->autoneg = AUTONEG_DISABLE; - ethtool_speed = - bnxt_fw_to_ethtool_speed(link_info->req_link_speed); - base->duplex = DUPLEX_HALF; - if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) - base->duplex = DUPLEX_FULL; } - base->speed = ethtool_speed; base->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { base->port = PORT_TP; - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - TP); - ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, - TP); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, + lk_ksettings->link_modes.advertising); } else { - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - FIBRE); - ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, - FIBRE); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + lk_ksettings->link_modes.advertising); if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) base->port = PORT_DA; - else if (link_info->media_type == - PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) + else base->port = PORT_FIBRE; } base->phy_address = link_info->phy_addr; @@ -1776,13 +2068,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev, return 0; } -static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) +static int +bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; u16 support_pam4_spds = link_info->support_pam4_speeds; u16 support_spds = link_info->support_speeds; u8 sig_mode = BNXT_SIG_MODE_NRZ; + u32 lanes_needed = 1; u16 fw_speed = 0; switch (ethtool_speed) { @@ -1803,37 +2097,46 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; break; case SPEED_20000: - if (support_spds & BNXT_LINK_SPEED_MSK_20GB) + if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; + lanes_needed = 2; + } break; case SPEED_25000: if (support_spds & BNXT_LINK_SPEED_MSK_25GB) fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; break; case SPEED_40000: - if (support_spds & BNXT_LINK_SPEED_MSK_40GB) + if (support_spds & BNXT_LINK_SPEED_MSK_40GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; + lanes_needed = 4; + } break; case SPEED_50000: - if (support_spds & BNXT_LINK_SPEED_MSK_50GB) { + if ((support_spds & BNXT_LINK_SPEED_MSK_50GB) && lanes != 1) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + lanes_needed = 2; } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; sig_mode = BNXT_SIG_MODE_PAM4; } break; case SPEED_100000: - if (support_spds & BNXT_LINK_SPEED_MSK_100GB) { + if ((support_spds & BNXT_LINK_SPEED_MSK_100GB) && + lanes != 2 && lanes != 1) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; + lanes_needed = 4; } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 2; } break; case SPEED_200000: if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 4; } break; } @@ -1843,6 +2146,11 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) return -EINVAL; } + if (lanes && lanes != lanes_needed) { + netdev_err(dev, "unsupported number of lanes for speed\n"); + return -EINVAL; + } + if (link_info->req_link_speed == fw_speed && link_info->req_signal_mode == sig_mode && link_info->autoneg == 0) @@ -1887,7 +2195,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, struct bnxt_link_info *link_info = &bp->link_info; const struct ethtool_link_settings *base = &lk_ksettings->base; bool set_pause = false; - u32 speed; + u32 speed, lanes = 0; int rc = 0; if (!BNXT_PHY_CFG_ABLE(bp)) @@ -1895,12 +2203,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev, mutex_lock(&bp->link_lock); if (base->autoneg == AUTONEG_ENABLE) { - link_info->advertising = 0; - link_info->advertising_pam4 = 0; - BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings, - advertising); - BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4, - lk_ksettings, advertising); + bnxt_set_ethtool_speeds(link_info, + lk_ksettings->link_modes.advertising); link_info->autoneg |= BNXT_AUTONEG_SPEED; if (!link_info->advertising && !link_info->advertising_pam4) { link_info->advertising = link_info->support_auto_speeds; @@ -1928,7 +2232,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev, goto set_setting_exit; } speed = base->speed; - rc = bnxt_force_link_speed(dev, speed); + lanes = lk_ksettings->lanes; + rc = bnxt_force_link_speed(dev, speed, lanes); if (rc) { if (rc == -EALREADY) rc = 0; @@ -4129,6 +4434,7 @@ void bnxt_ethtool_free(struct bnxt *bp) } const struct ethtool_ops bnxt_ethtool_ops = { + .cap_link_lanes_supported = 1, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 3ae8e8af8a..d5fad5a3cd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2022 Broadcom Inc. + * Copyright (c) 2018-2023 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -191,6 +191,11 @@ struct cmd_nums { #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL #define HWRM_QUEUE_GLOBAL_CFG 0x86UL #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL + #define HWRM_QUEUE_QCAPS 0x8cUL #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL #define HWRM_CFA_L2_FILTER_FREE 0x91UL #define HWRM_CFA_L2_FILTER_CFG 0x92UL @@ -315,6 +320,7 @@ struct cmd_nums { #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL #define HWRM_CFA_TLS_FILTER_FREE 0x129UL + #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL @@ -383,6 +389,9 @@ struct cmd_nums { #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL #define HWRM_FUNC_SYNCE_CFG 0x1abUL #define HWRM_FUNC_SYNCE_QCFG 0x1acUL + #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL + #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL + #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -408,10 +417,10 @@ struct cmd_nums { #define HWRM_MFG_SELFTEST_QLIST 0x216UL #define HWRM_MFG_SELFTEST_EXEC 0x217UL #define HWRM_STAT_GENERIC_QSTATS 0x218UL + #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL #define HWRM_TF 0x2bcUL #define HWRM_TF_VERSION_GET 0x2bdUL #define HWRM_TF_SESSION_OPEN 0x2c6UL - #define HWRM_TF_SESSION_ATTACH 0x2c7UL #define HWRM_TF_SESSION_REGISTER 0x2c8UL #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL #define HWRM_TF_SESSION_CLOSE 0x2caUL @@ -426,14 +435,6 @@ struct cmd_nums { #define HWRM_TF_TBL_TYPE_GET 0x2daUL #define HWRM_TF_TBL_TYPE_SET 0x2dbUL #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL - #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL - #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL - #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL - #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL - #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL - #define HWRM_TF_EXT_EM_OP 0x2e7UL - #define HWRM_TF_EXT_EM_CFG 0x2e8UL - #define HWRM_TF_EXT_EM_QCFG 0x2e9UL #define HWRM_TF_EM_INSERT 0x2eaUL #define HWRM_TF_EM_DELETE 0x2ebUL #define HWRM_TF_EM_HASH_INSERT 0x2ecUL @@ -465,6 +466,14 @@ struct cmd_nums { #define HWRM_TFC_IDX_TBL_GET 0x390UL #define HWRM_TFC_IDX_TBL_FREE 0x391UL #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL + #define HWRM_TFC_TCAM_SET 0x393UL + #define HWRM_TFC_TCAM_GET 0x394UL + #define HWRM_TFC_TCAM_ALLOC 0x395UL + #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL + #define HWRM_TFC_TCAM_FREE 0x397UL + #define HWRM_TFC_IF_TBL_SET 0x398UL + #define HWRM_TFC_IF_TBL_GET 0x399UL + #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL #define HWRM_SV 0x400UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL @@ -494,6 +503,8 @@ struct cmd_nums { #define HWRM_DBG_USEQ_RUN 0xff29UL #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL + #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL + #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL #define HWRM_NVM_DEFRAG 0xffecUL #define HWRM_NVM_REQ_ARBITRATION 0xffedUL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL @@ -540,6 +551,7 @@ struct ret_codes { #define HWRM_ERR_CODE_BUSY 0x10UL #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL + #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL @@ -571,8 +583,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_RSVD 118 -#define HWRM_VERSION_STR "1.10.2.118" +#define HWRM_VERSION_RSVD 171 +#define HWRM_VERSION_STR "1.10.2.171" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -761,51 +773,53 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT __le16 event_id; - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL - #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL - #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL - #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL - #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL - #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL - #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL - #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL - #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL - #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL - #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL - #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL - #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL - #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL - #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL - #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL + #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR __le32 event_data2; u8 opaque_v; #define ASYNC_EVENT_CMPL_V 0x1UL @@ -1011,6 +1025,7 @@ struct hwrm_async_event_cmpl_vf_cfg_change { #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL }; /* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ @@ -1402,6 +1417,45 @@ struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold { #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8 }; +/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_thermal { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING +}; + /* hwrm_func_reset_input (size:192b/24B) */ struct hwrm_func_reset_input { __le16 req_type; @@ -1502,7 +1556,7 @@ struct hwrm_func_vf_free_output { u8 valid; }; -/* hwrm_func_vf_cfg_input (size:448b/56B) */ +/* hwrm_func_vf_cfg_input (size:576b/72B) */ struct hwrm_func_vf_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1510,20 +1564,22 @@ struct hwrm_func_vf_cfg_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL - #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL - #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL - #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL __le16 mtu; __le16 guest_vlan; __le16 async_event_cr; @@ -1547,8 +1603,12 @@ struct hwrm_func_vf_cfg_input { __le16 num_vnics; __le16 num_stat_ctxs; __le16 num_hw_ring_grps; - __le16 num_tx_key_ctxs; - __le16 num_rx_key_ctxs; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le16 num_msix; + u8 unused[2]; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; }; /* hwrm_func_vf_cfg_output (size:128b/16B) */ @@ -1572,7 +1632,7 @@ struct hwrm_func_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_qcaps_output (size:768b/96B) */ +/* hwrm_func_qcaps_output (size:896b/112B) */ struct hwrm_func_qcaps_output { __le16 error_code; __le16 req_type; @@ -1686,6 +1746,11 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL __le16 tunnel_disable_flag; #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL @@ -1695,7 +1760,15 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL + u8 key_xid_partition_cap; + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_TKC 0x1UL + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_RKC 0x2UL + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_TKC 0x4UL + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_RKC 0x8UL u8 unused_1; + u8 device_serial_number[8]; + __le16 ctxs_per_partition; + u8 unused_2[5]; u8 valid; }; @@ -1710,7 +1783,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:896b/112B) */ +/* hwrm_func_qcfg_output (size:1024b/128B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -1870,19 +1943,24 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 __le16 host_mtu; - __le16 alloc_tx_key_ctxs; - __le16 alloc_rx_key_ctxs; + u8 unused_3[2]; + u8 unused_4[2]; u8 port_kdnet_mode; #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED u8 kdnet_pcie_function; __le16 port_kdnet_fid; - u8 unused_3; + u8 unused_5[2]; + __le32 alloc_tx_key_ctxs; + __le32 alloc_rx_key_ctxs; + u8 lag_id; + u8 parif; + u8 unused_6[5]; u8 valid; }; -/* hwrm_func_cfg_input (size:960b/120B) */ +/* hwrm_func_cfg_input (size:1088b/136B) */ struct hwrm_func_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -2061,8 +2139,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 __be16 tpid; __le16 host_mtu; - __le16 num_tx_key_ctxs; - __le16 num_rx_key_ctxs; + u8 unused_0[4]; __le32 enables2; #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL @@ -2083,7 +2160,12 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB - u8 unused_0[6]; + u8 unused_1[2]; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; + __le32 unused_2; }; /* hwrm_func_cfg_output (size:128b/16B) */ @@ -2390,7 +2472,11 @@ struct hwrm_func_drv_qver_input { __le64 resp_addr; __le32 reserved; __le16 fid; - u8 unused_0[2]; + u8 driver_type; + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE + u8 unused_0; }; /* hwrm_func_drv_qver_output (size:256b/32B) */ @@ -2435,7 +2521,7 @@ struct hwrm_func_resource_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_resource_qcaps_output (size:512b/64B) */ +/* hwrm_func_resource_qcaps_output (size:704b/88B) */ struct hwrm_func_resource_qcaps_output { __le16 error_code; __le16 req_type; @@ -2467,15 +2553,20 @@ struct hwrm_func_resource_qcaps_output { __le16 max_tx_scheduler_inputs; __le16 flags; #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL - __le16 min_tx_key_ctxs; - __le16 max_tx_key_ctxs; - __le16 min_rx_key_ctxs; - __le16 max_rx_key_ctxs; - u8 unused_0[5]; + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; + u8 unused_0[3]; u8 valid; }; -/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */ +/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */ struct hwrm_func_vf_resource_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -2502,14 +2593,18 @@ struct hwrm_func_vf_resource_cfg_input { __le16 max_hw_ring_grps; __le16 flags; #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL - __le16 min_tx_key_ctxs; - __le16 max_tx_key_ctxs; - __le16 min_rx_key_ctxs; - __le16 max_rx_key_ctxs; - u8 unused_0[2]; -}; - -/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; +}; + +/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */ struct hwrm_func_vf_resource_cfg_output { __le16 error_code; __le16 req_type; @@ -2523,9 +2618,9 @@ struct hwrm_func_vf_resource_cfg_output { __le16 reserved_vnics; __le16 reserved_stat_ctx; __le16 reserved_hw_ring_grps; - __le16 reserved_tx_key_ctxs; - __le16 reserved_rx_key_ctxs; - u8 unused_0[3]; + __le32 reserved_tx_key_ctxs; + __le32 reserved_rx_key_ctxs; + u8 unused_0[7]; u8 valid; }; @@ -2592,7 +2687,8 @@ struct hwrm_func_backing_store_qcaps_output { __le16 rkc_entry_size; __le32 tkc_max_entries; __le32 rkc_max_entries; - u8 rsvd1[7]; + __le16 fast_qpmd_qp_num_entries; + u8 rsvd1[5]; u8 valid; }; @@ -2630,27 +2726,28 @@ struct hwrm_func_backing_store_cfg_input { #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL __le32 enables; - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL u8 qpc_pg_size_qpc_lvl; #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0 @@ -3047,7 +3144,7 @@ struct hwrm_func_backing_store_cfg_input { #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4) #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4) #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G - u8 rsvd[2]; + __le16 qp_num_fast_qpmd_entries; }; /* hwrm_func_backing_store_cfg_output (size:128b/16B) */ @@ -3477,6 +3574,8 @@ struct hwrm_func_backing_store_cfg_v2_input { #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID __le16 instance; @@ -3546,6 +3645,8 @@ struct hwrm_func_backing_store_qcfg_v2_input { #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID __le16 instance; @@ -3559,22 +3660,24 @@ struct hwrm_func_backing_store_qcfg_v2_output { __le16 seq_id; __le16 resp_len; __le16 type; - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID __le16 instance; __le32 flags; __le64 page_dir; @@ -3609,7 +3712,8 @@ struct hwrm_func_backing_store_qcfg_v2_output { struct qpc_split_entries { __le32 qp_num_l2_entries; __le32 qp_num_qp1_entries; - __le32 rsvd[2]; + __le32 qp_num_fast_qpmd_entries; + __le32 rsvd; }; /* srq_split_entries (size:128b/16B) */ @@ -3666,6 +3770,8 @@ struct hwrm_func_backing_store_qcaps_v2_input { #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID u8 rsvd[6]; @@ -3696,13 +3802,16 @@ struct hwrm_func_backing_store_qcaps_v2_output { #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID __le16 entry_size; __le32 flags; - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL __le32 instance_bit_map; u8 ctx_init_value; u8 ctx_init_offset; @@ -3712,7 +3821,13 @@ struct hwrm_func_backing_store_qcaps_v2_output { __le32 min_num_entries; __le16 next_valid_type; u8 subtype_valid_cnt; - u8 rsvd2; + u8 exact_cnt_bit_map; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4 __le32 split_entry_0; __le32 split_entry_1; __le32 split_entry_2; @@ -4599,7 +4714,7 @@ struct tx_port_stats_ext { __le64 pfc_pri7_tx_transitions; }; -/* rx_port_stats_ext (size:3776b/472B) */ +/* rx_port_stats_ext (size:3904b/488B) */ struct rx_port_stats_ext { __le64 link_down_events; __le64 continuous_pause_events; @@ -4660,6 +4775,8 @@ struct rx_port_stats_ext { __le64 rx_discard_packets_cos7; __le64 rx_fec_corrected_blocks; __le64 rx_fec_uncorrectable_blocks; + __le64 rx_filter_miss; + __le64 rx_fec_symbol_err; }; /* hwrm_port_qstats_ext_input (size:320b/40B) */ @@ -6092,6 +6209,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -6181,12 +6299,16 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL + #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL + #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL + #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; }; -/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ +/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */ struct hwrm_vnic_tpa_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -6208,6 +6330,7 @@ struct hwrm_vnic_tpa_cfg_input { #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL __le16 vnic_id; __le16 max_agg_segs; #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL @@ -6227,6 +6350,25 @@ struct hwrm_vnic_tpa_cfg_input { u8 unused_0[2]; __le32 max_agg_timer; __le32 min_agg_len; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_1[4]; }; /* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ @@ -6282,7 +6424,25 @@ struct hwrm_vnic_tpa_qcfg_output { #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX __le32 max_agg_timer; __le32 min_agg_len; - u8 unused_0[7]; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_0[3]; u8 valid; }; @@ -6317,8 +6477,9 @@ struct hwrm_vnic_rss_cfg_input { __le64 hash_key_tbl_addr; __le16 rss_ctx_idx; u8 flags; - #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL - #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL + #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL u8 ring_select_mode; #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL @@ -6480,14 +6641,15 @@ struct hwrm_ring_alloc_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL - #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL - #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL - #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL - #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL - #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL - #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL - #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL + #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL @@ -6541,7 +6703,7 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 - __le16 unused_3; + __le16 steering_tag; __le32 reserved3; __le32 stat_ctx_id; __le32 reserved4; @@ -6917,6 +7079,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_4; @@ -7099,6 +7262,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input { #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 tunnel_flags; @@ -7233,7 +7397,8 @@ struct hwrm_cfa_encap_record_alloc_input { #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE u8 unused_0[3]; __le32 encap_data[20]; }; @@ -7338,6 +7503,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 pri_hint; @@ -7485,6 +7651,7 @@ struct hwrm_cfa_decap_filter_alloc_input { #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_0; @@ -7628,6 +7795,7 @@ struct hwrm_cfa_flow_alloc_input { #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL }; @@ -8053,8 +8221,11 @@ struct hwrm_tunnel_dst_port_query_input { #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI - u8 unused_0[7]; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE + u8 tunnel_next_proto; + u8 unused_0[6]; }; /* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ @@ -8094,10 +8265,12 @@ struct hwrm_tunnel_dst_port_alloc_input { #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI - u8 unused_0; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE + u8 tunnel_next_proto; __be16 tunnel_dst_port_val; - u8 unused_1[4]; + u8 unused_0[4]; }; /* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ @@ -8141,10 +8314,12 @@ struct hwrm_tunnel_dst_port_free_input { #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI - u8 unused_0; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE + u8 tunnel_next_proto; __le16 tunnel_dst_port_id; - u8 unused_1[4]; + u8 unused_0[4]; }; /* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ @@ -8212,7 +8387,7 @@ struct ctx_hw_stats_ext { __le64 rx_tpa_events; }; -/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ +/* hwrm_stat_ctx_alloc_input (size:320b/40B) */ struct hwrm_stat_ctx_alloc_input { __le16 req_type; __le16 cmpl_ring; @@ -8225,6 +8400,10 @@ struct hwrm_stat_ctx_alloc_input { #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL u8 unused_0; __le16 stats_dma_length; + __le16 flags; + #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL + __le16 steering_tag; + __le32 unused_1; }; /* hwrm_stat_ctx_alloc_output (size:128b/16B) */ @@ -8432,7 +8611,7 @@ struct hwrm_stat_generic_qstats_output { u8 valid; }; -/* generic_sw_hw_stats (size:1216b/152B) */ +/* generic_sw_hw_stats (size:1408b/176B) */ struct generic_sw_hw_stats { __le64 pcie_statistics_tx_tlp; __le64 pcie_statistics_rx_tlp; @@ -8453,6 +8632,9 @@ struct generic_sw_hw_stats { __le64 cache_miss_count_cfcs; __le64 cache_miss_count_cfcc; __le64 cache_miss_count_cfcm; + __le64 hw_db_recov_dbs_dropped; + __le64 hw_db_recov_drops_serviced; + __le64 hw_db_recov_dbs_recovered; }; /* hwrm_fw_reset_input (size:192b/24B) */ @@ -8876,7 +9058,7 @@ struct hwrm_temp_monitor_query_input { __le64 resp_addr; }; -/* hwrm_temp_monitor_query_output (size:128b/16B) */ +/* hwrm_temp_monitor_query_output (size:192b/24B) */ struct hwrm_temp_monitor_query_output { __le16 error_code; __le16 req_type; @@ -8886,14 +9068,20 @@ struct hwrm_temp_monitor_query_output { u8 phy_temp; u8 om_temp; u8 flags; - #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL u8 temp2; u8 phy_temp2; u8 om_temp2; + u8 warn_threshold; + u8 critical_threshold; + u8 fatal_threshold; + u8 shutdown_threshold; + u8 unused_0[4]; u8 valid; }; @@ -9317,7 +9505,8 @@ struct hwrm_dbg_ring_info_get_output { __le32 producer_index; __le32 consumer_index; __le32 cag_vector_ctrl; - u8 unused_0[3]; + __le16 st_tag; + u8 unused_0; u8 valid; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c new file mode 100644 index 0000000000..669d24ba0e --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c @@ -0,0 +1,241 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_hwmon.h" + +void bnxt_hwmon_notify_event(struct bnxt *bp) +{ + u32 attr; + + if (!bp->hwmon_dev) + return; + + switch (bp->thermal_threshold_type) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: + attr = hwmon_temp_max_alarm; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: + attr = hwmon_temp_crit_alarm; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: + attr = hwmon_temp_emergency_alarm; + break; + default: + return; + } + + hwmon_notify_event(&bp->pdev->dev, hwmon_temp, attr, 0); +} + +static int bnxt_hwrm_temp_query(struct bnxt *bp, u8 *temp) +{ + struct hwrm_temp_monitor_query_output *resp; + struct hwrm_temp_monitor_query_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) + goto drop_req; + + if (temp) { + *temp = resp->temp; + } else if (resp->flags & + TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE) { + bp->fw_cap |= BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED; + bp->warn_thresh_temp = resp->warn_threshold; + bp->crit_thresh_temp = resp->critical_threshold; + bp->fatal_thresh_temp = resp->fatal_threshold; + bp->shutdown_thresh_temp = resp->shutdown_threshold; + } +drop_req: + hwrm_req_drop(bp, req); + return rc; +} + +static umode_t bnxt_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct bnxt *bp = _data; + + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_emergency: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_emergency_alarm: + if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED)) + return 0; + return 0444; + default: + return 0; + } +} + +static int bnxt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct bnxt *bp = dev_get_drvdata(dev); + u8 temp = 0; + int rc; + + switch (attr) { + case hwmon_temp_input: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp * 1000; + return rc; + case hwmon_temp_max: + *val = bp->warn_thresh_temp * 1000; + return 0; + case hwmon_temp_crit: + *val = bp->crit_thresh_temp * 1000; + return 0; + case hwmon_temp_emergency: + *val = bp->fatal_thresh_temp * 1000; + return 0; + case hwmon_temp_max_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->warn_thresh_temp; + return rc; + case hwmon_temp_crit_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->crit_thresh_temp; + return rc; + case hwmon_temp_emergency_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->fatal_thresh_temp; + return rc; + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_channel_info *bnxt_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_EMERGENCY | HWMON_T_MAX_ALARM | + HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM), + NULL +}; + +static const struct hwmon_ops bnxt_hwmon_ops = { + .is_visible = bnxt_hwmon_is_visible, + .read = bnxt_hwmon_read, +}; + +static const struct hwmon_chip_info bnxt_hwmon_chip_info = { + .ops = &bnxt_hwmon_ops, + .info = bnxt_hwmon_info, +}; + +static ssize_t temp1_shutdown_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnxt *bp = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%u\n", bp->shutdown_thresh_temp * 1000); +} + +static ssize_t temp1_shutdown_alarm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnxt *bp = dev_get_drvdata(dev); + u8 temp; + int rc; + + rc = bnxt_hwrm_temp_query(bp, &temp); + if (rc) + return -EIO; + + return sysfs_emit(buf, "%u\n", temp >= bp->shutdown_thresh_temp); +} + +static DEVICE_ATTR_RO(temp1_shutdown); +static DEVICE_ATTR_RO(temp1_shutdown_alarm); + +static struct attribute *bnxt_temp_extra_attrs[] = { + &dev_attr_temp1_shutdown.attr, + &dev_attr_temp1_shutdown_alarm.attr, + NULL, +}; + +static umode_t bnxt_temp_extra_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct bnxt *bp = dev_get_drvdata(dev); + + /* Shutdown temperature setting in NVM is optional */ + if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED) || + !bp->shutdown_thresh_temp) + return 0; + + return attr->mode; +} + +static const struct attribute_group bnxt_temp_extra_group = { + .attrs = bnxt_temp_extra_attrs, + .is_visible = bnxt_temp_extra_attrs_visible, +}; +__ATTRIBUTE_GROUPS(bnxt_temp_extra); + +void bnxt_hwmon_uninit(struct bnxt *bp) +{ + if (bp->hwmon_dev) { + hwmon_device_unregister(bp->hwmon_dev); + bp->hwmon_dev = NULL; + } +} + +void bnxt_hwmon_init(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int rc; + + /* temp1_xxx is only sensor, ensure not registered if it will fail */ + rc = bnxt_hwrm_temp_query(bp, NULL); + if (rc == -EACCES || rc == -EOPNOTSUPP) { + bnxt_hwmon_uninit(bp); + return; + } + + if (bp->hwmon_dev) + return; + + bp->hwmon_dev = hwmon_device_register_with_info(&pdev->dev, + DRV_MODULE_NAME, bp, + &bnxt_hwmon_chip_info, + bnxt_temp_extra_groups); + if (IS_ERR(bp->hwmon_dev)) { + bp->hwmon_dev = NULL; + dev_warn(&pdev->dev, "Cannot register hwmon device\n"); + } +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h new file mode 100644 index 0000000000..de54a562e0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h @@ -0,0 +1,30 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWMON_H +#define BNXT_HWMON_H + +#ifdef CONFIG_BNXT_HWMON +void bnxt_hwmon_notify_event(struct bnxt *bp); +void bnxt_hwmon_uninit(struct bnxt *bp); +void bnxt_hwmon_init(struct bnxt *bp); +#else +static inline void bnxt_hwmon_notify_event(struct bnxt *bp) +{ +} + +static inline void bnxt_hwmon_uninit(struct bnxt *bp) +{ +} + +static inline void bnxt_hwmon_init(struct bnxt *bp) +{ +} +#endif +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 132442f16f..1df3d56cc4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -485,6 +485,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) if (msg_len > BNXT_HWRM_MAX_REQ_LEN && msg_len > bp->hwrm_max_ext_req_len) { + netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x", + req_type); rc = -E2BIG; goto exit; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h index c98032e381..15ca51b5d2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -137,4 +137,18 @@ int hwrm_req_send_silent(struct bnxt *bp, void *req); int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len); void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags); void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma); + +/* Older devices can only support req length of 128. + * HWRM_FUNC_CFG requests which don't need fields starting at + * num_quic_tx_key_ctxs can use this helper to avoid getting -E2BIG. + */ +static inline int +bnxt_hwrm_func_cfg_short_req_init(struct bnxt *bp, + struct hwrm_func_cfg_input **req) +{ + u32 req_len; + + req_len = min_t(u32, sizeof(**req), bp->hwrm_max_ext_req_len); + return __hwrm_req_init(bp, (void **)req, HWRM_FUNC_CFG, req_len); +} #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index dde327f2c5..c722b3b417 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -95,7 +95,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { req->fid = cpu_to_le16(vf->fw_fid); req->flags = cpu_to_le32(func_flags); @@ -146,7 +146,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -232,7 +232,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) } vf = &bp->pf.vf[vf_id]; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -274,7 +274,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (vlan_tag == vf->vlan) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { req->fid = cpu_to_le16(vf->fw_fid); req->dflt_vlan = cpu_to_le16(vlan_tag); @@ -314,7 +314,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { req->fid = cpu_to_le16(vf->fw_fid); req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | @@ -491,7 +491,7 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) struct bnxt_vf_info *vf; int rc; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -550,7 +550,6 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; vf_vnics = hw_resc->max_vnics - bp->nr_vnics; - vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); @@ -572,11 +571,20 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; vf_rx_rings /= num_vfs; - vf_vnics /= num_vfs; + if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) && + vf_vnics >= pf->max_vfs) { + /* Take into account that FW has pre-reserved 1 VNIC for + * each pf->max_vfs. + */ + vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs; + } else { + vf_vnics /= num_vfs; + } vf_stat_ctx /= num_vfs; vf_ring_grps /= num_vfs; vf_rss /= num_vfs; + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); req->min_tx_rings = cpu_to_le16(vf_tx_rings); req->min_rx_rings = cpu_to_le16(vf_rx_rings); @@ -645,7 +653,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) u32 mtu, i; int rc; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 89c8ddc656..2d7ae71287 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3249,23 +3249,6 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void bcmgenet_poll_controller(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - - /* Invoke the main RX/TX interrupt handler */ - disable_irq(priv->irq0); - bcmgenet_isr0(priv->irq0, priv); - enable_irq(priv->irq0); - - /* And the interrupt handler for RX/TX priority queues */ - disable_irq(priv->irq1); - bcmgenet_isr1(priv->irq1, priv); - enable_irq(priv->irq1); -} -#endif - static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) { u32 reg; @@ -3722,9 +3705,6 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_set_mac_address = bcmgenet_set_mac_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = bcmgenet_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bcmgenet_poll_controller, -#endif .ndo_get_stats = bcmgenet_get_stats, .ndo_change_carrier = bcmgenet_change_carrier, }; @@ -4166,7 +4146,7 @@ err: return err; } -static int bcmgenet_remove(struct platform_device *pdev) +static void bcmgenet_remove(struct platform_device *pdev) { struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); @@ -4174,8 +4154,6 @@ static int bcmgenet_remove(struct platform_device *pdev) unregister_netdev(priv->dev); bcmgenet_mii_exit(priv->dev); free_netdev(priv->dev); - - return 0; } static void bcmgenet_shutdown(struct platform_device *pdev) @@ -4354,7 +4332,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match); static struct platform_driver bcmgenet_driver = { .probe = bcmgenet_probe, - .remove = bcmgenet_remove, + .remove_new = bcmgenet_remove, .shutdown = bcmgenet_shutdown, .driver = { .name = "bcmgenet", diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 3a6763c5e8..fcf8485f34 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2593,7 +2593,7 @@ out_out: return err; } -static int sbmac_remove(struct platform_device *pldev) +static void sbmac_remove(struct platform_device *pldev) { struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); @@ -2604,13 +2604,11 @@ static int sbmac_remove(struct platform_device *pldev) mdiobus_free(sc->mii_bus); iounmap(sc->sbm_base); free_netdev(dev); - - return 0; } static struct platform_driver sbmac_driver = { .probe = sbmac_probe, - .remove = sbmac_remove, + .remove_new = sbmac_remove, .driver = { .name = sbmac_string, }, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index f1c8ff5b63..f52830dfb2 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6314,6 +6314,46 @@ err_out: return -EOPNOTSUPP; } +static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, + struct skb_shared_hwtstamps *timestamp) +{ + memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); + timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + + tp->ptp_adjust); +} + +static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock) +{ + *hwclock = tr32(TG3_TX_TSTAMP_LSB); + *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; +} + +static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp) +{ + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + struct skb_shared_hwtstamps timestamp; + u64 hwclock; + + if (tp->ptp_txts_retrycnt > 2) + goto done; + + tg3_read_tx_tstamp(tp, &hwclock); + + if (hwclock != tp->pre_tx_ts) { + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp); + goto done; + } + tp->ptp_txts_retrycnt++; + return HZ / 10; +done: + dev_consume_skb_any(tp->tx_tstamp_skb); + tp->tx_tstamp_skb = NULL; + tp->ptp_txts_retrycnt = 0; + tp->pre_tx_ts = 0; + return -1; +} + static const struct ptp_clock_info tg3_ptp_caps = { .owner = THIS_MODULE, .name = "tg3 clock", @@ -6325,19 +6365,12 @@ static const struct ptp_clock_info tg3_ptp_caps = { .pps = 0, .adjfine = tg3_ptp_adjfine, .adjtime = tg3_ptp_adjtime, + .do_aux_work = tg3_ptp_ts_aux_work, .gettimex64 = tg3_ptp_gettimex, .settime64 = tg3_ptp_settime, .enable = tg3_ptp_enable, }; -static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, - struct skb_shared_hwtstamps *timestamp) -{ - memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); - timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + - tp->ptp_adjust); -} - /* tp->lock must be held */ static void tg3_ptp_init(struct tg3 *tp) { @@ -6368,6 +6401,8 @@ static void tg3_ptp_fini(struct tg3 *tp) ptp_clock_unregister(tp->ptp_clock); tp->ptp_clock = NULL; tp->ptp_adjust = 0; + dev_consume_skb_any(tp->tx_tstamp_skb); + tp->tx_tstamp_skb = NULL; } static inline int tg3_irq_sync(struct tg3 *tp) @@ -6546,6 +6581,7 @@ static void tg3_tx(struct tg3_napi *tnapi) while (sw_idx != hw_idx) { struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + bool complete_skb_later = false; struct sk_buff *skb = ri->skb; int i, tx_bug = 0; @@ -6556,12 +6592,17 @@ static void tg3_tx(struct tg3_napi *tnapi) if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { struct skb_shared_hwtstamps timestamp; - u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); - hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; + u64 hwclock; - tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); - - skb_tstamp_tx(skb, ×tamp); + tg3_read_tx_tstamp(tp, &hwclock); + if (hwclock != tp->pre_tx_ts) { + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + skb_tstamp_tx(skb, ×tamp); + tp->pre_tx_ts = 0; + } else { + tp->tx_tstamp_skb = skb; + complete_skb_later = true; + } } dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), @@ -6599,7 +6640,10 @@ static void tg3_tx(struct tg3_napi *tnapi) pkts_compl++; bytes_compl += skb->len; - dev_consume_skb_any(skb); + if (!complete_skb_later) + dev_consume_skb_any(skb); + else + ptp_schedule_worker(tp->ptp_clock, 0); if (unlikely(tx_bug)) { tg3_tx_recover(tp); @@ -6611,9 +6655,9 @@ static void tg3_tx(struct tg3_napi *tnapi) tnapi->tx_cons = sw_idx; - /* Need to make the tx_cons update visible to tg3_start_xmit() + /* Need to make the tx_cons update visible to __tg3_start_xmit() * before checking for netif_queue_stopped(). Without the - * memory barrier, there is a small possibility that tg3_start_xmit() + * memory barrier, there is a small possibility that __tg3_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); @@ -7853,7 +7897,7 @@ static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; } -static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); +static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *); /* Use GSO to workaround all TSO packets that meet HW bug conditions * indicated in tg3_tx_frag_set() @@ -7889,7 +7933,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, skb_list_walk_safe(segs, seg, next) { skb_mark_not_on_list(seg); - tg3_start_xmit(seg, tp->dev); + __tg3_start_xmit(seg, tp->dev); } tg3_tso_bug_end: @@ -7899,7 +7943,7 @@ tg3_tso_bug_end: } /* hard_start_xmit for all devices */ -static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry, base_flags, mss, vlan = 0; @@ -8038,8 +8082,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && tg3_flag(tp, TX_TSTAMP_EN)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - base_flags |= TXD_FLAG_HWTSTAMP; + tg3_full_lock(tp, 0); + if (!tp->pre_tx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + base_flags |= TXD_FLAG_HWTSTAMP; + tg3_read_tx_tstamp(tp, &tp->pre_tx_ts); + } + tg3_full_unlock(tp); } len = skb_headlen(skb); @@ -8143,11 +8192,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_wake_queue(txq); } - if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { - /* Packets are ready, update Tx producer idx on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); - } - return NETDEV_TX_OK; dma_error: @@ -8160,6 +8204,42 @@ drop_nofree: return NETDEV_TX_OK; } +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_queue *txq; + u16 skb_queue_mapping; + netdev_tx_t ret; + + skb_queue_mapping = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, skb_queue_mapping); + + ret = __tg3_start_xmit(skb, dev); + + /* Notify the hardware that packets are ready by updating the TX ring + * tail pointer. We respect netdev_xmit_more() thus avoiding poking + * the hardware for every packet. To guarantee forward progress the TX + * ring must be drained when it is full as indicated by + * netif_xmit_stopped(). This needs to happen even when the current + * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets + * queued by previous __tg3_start_xmit() calls might get stuck in + * the queue forever. + */ + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { + struct tg3_napi *tnapi; + struct tg3 *tp; + + tp = netdev_priv(dev); + tnapi = &tp->napi[skb_queue_mapping]; + + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + } + + return ret; +} + static void tg3_mac_loopback(struct tg3 *tp, bool enable) { if (enable) { @@ -17044,7 +17124,7 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) !tg3_flag(tp, PCI_EXPRESS)) goto out; -#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) +#if defined(CONFIG_PPC64) || defined(CONFIG_PARISC) goal = BOUNDARY_MULTI_CACHELINE; #else #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) @@ -17719,7 +17799,7 @@ static int tg3_init_one(struct pci_dev *pdev, * device behind the EPB cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. * On 64-bit systems without IOMMU, use 64-bit dma_mask and - * do DMA address check in tg3_start_xmit(). + * do DMA address check in __tg3_start_xmit(). */ if (tg3_flag(tp, IS_5788)) persist_dma_mask = dma_mask = DMA_BIT_MASK(32); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 8d753f8c5b..5016475e50 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3192,6 +3192,7 @@ struct tg3 { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; s64 ptp_adjust; + u8 ptp_txts_retrycnt; /* begin "tx thread" cacheline section */ void (*write32_tx_mbox) (struct tg3 *, u32, @@ -3372,6 +3373,8 @@ struct tg3 { struct tg3_hw_stats *hw_stats; dma_addr_t stats_mapping; struct work_struct reset_task; + struct sk_buff *tx_tstamp_skb; + u64 pre_tx_ts; int nvram_lock_cnt; u32 nvram_size; diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b07522ac3e..9c80ab07a7 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2839,7 +2839,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) { - strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + strscpy_pad(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } static void diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 627a93ce38..10b1e53403 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -19,7 +19,6 @@ #include #include -/* Fix for IA64 */ #include #include diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index b940dcd3ac..cebae0f418 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5156,7 +5156,7 @@ err_disable_clocks: return err; } -static int macb_remove(struct platform_device *pdev) +static void macb_remove(struct platform_device *pdev) { struct net_device *dev; struct macb *bp; @@ -5181,8 +5181,6 @@ static int macb_remove(struct platform_device *pdev) phylink_destroy(bp->phylink); free_netdev(dev); } - - return 0; } static int __maybe_unused macb_suspend(struct device *dev) @@ -5398,7 +5396,7 @@ static const struct dev_pm_ops macb_pm_ops = { static struct platform_driver macb_driver = { .probe = macb_probe, - .remove = macb_remove, + .remove_new = macb_remove, .driver = { .name = "macb", .of_match_table = of_match_ptr(macb_dt_ids), diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index f4f87dfa96..5e97f1e4e3 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1820,7 +1820,7 @@ err_alloc: * changes the link status, releases the DMA descriptor rings, * unregisters the MDIO bus and unmaps the allocated memory. */ -static int xgmac_remove(struct platform_device *pdev) +static void xgmac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct xgmac_priv *priv = netdev_priv(ndev); @@ -1840,8 +1840,6 @@ static int xgmac_remove(struct platform_device *pdev) release_mem_region(res->start, resource_size(res)); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -1921,7 +1919,7 @@ static struct platform_driver xgmac_driver = { .pm = &xgmac_pm_ops, }, .probe = xgmac_probe, - .remove = xgmac_remove, + .remove_new = xgmac_remove, }; module_platform_driver(xgmac_driver); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 9d56181a30..d3e07b6ed5 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -442,10 +442,11 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) oct = lio->oct_dev; memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); - strcpy(drvinfo->driver, "liquidio"); - strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, - ETHTOOL_FWVERS_LEN); - strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); + strscpy(drvinfo->driver, "liquidio", sizeof(drvinfo->driver)); + strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(oct->pci_dev), + sizeof(drvinfo->bus_info)); } static void @@ -458,10 +459,11 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) oct = lio->oct_dev; memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); - strcpy(drvinfo->driver, "liquidio_vf"); - strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, - ETHTOOL_FWVERS_LEN); - strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); + strscpy(drvinfo->driver, "liquidio_vf", sizeof(drvinfo->driver)); + strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(oct->pci_dev), + sizeof(drvinfo->bus_info)); } static int diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 100daadbea..34f02a8ec2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1689,7 +1689,7 @@ static int load_firmware(struct octeon_device *oct) if (fw_type_is_auto()) { tmp_fw_type = LIO_FW_NAME_TYPE_NIC; - strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); + strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type)); } else { tmp_fw_type = fw_type; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index 600de587d7..aa6c0dfb6f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -638,7 +638,8 @@ lio_vf_rep_netdev_event(struct notifier_block *nb, memset(&rep_cfg, 0, sizeof(rep_cfg)); rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME; rep_cfg.ifidx = vf_rep->ifidx; - strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE); + strscpy(rep_cfg.rep_name.name, ndev->name, + sizeof(rep_cfg.rep_name.name)); ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg), NULL, 0); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 364f4f912d..6b6cb73482 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1217,10 +1217,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) goto core_drv_init_err; } - strncpy(app_name, + strscpy(app_name, get_oct_app_string( (u32)recv_pkt->rh.r_core_drv_init.app_mode), - sizeof(app_name) - 1); + sizeof(app_name)); oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) { oct->fw_info.max_nic_ports = @@ -1257,9 +1257,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) memcpy(cs, get_rbd( recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs)); - strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); - strncpy(oct->boardinfo.serial_number, cs->board_serial_number, - OCT_SERIAL_LEN); + strscpy(oct->boardinfo.name, cs->boardname, + sizeof(oct->boardinfo.name)); + strscpy(oct->boardinfo.serial_number, cs->board_serial_number, + sizeof(oct->boardinfo.serial_number)); octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3)); diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index edde0b8fa4..007d4b0681 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1521,7 +1521,7 @@ err: return result; } -static int octeon_mgmt_remove(struct platform_device *pdev) +static void octeon_mgmt_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct octeon_mgmt *p = netdev_priv(netdev); @@ -1529,7 +1529,6 @@ static int octeon_mgmt_remove(struct platform_device *pdev) unregister_netdev(netdev); of_node_put(p->phy_np); free_netdev(netdev); - return 0; } static const struct of_device_id octeon_mgmt_match[] = { @@ -1546,7 +1545,7 @@ static struct platform_driver octeon_mgmt_driver = { .of_match_table = octeon_mgmt_match, }, .probe = octeon_mgmt_probe, - .remove = octeon_mgmt_remove, + .remove_new = octeon_mgmt_remove, }; module_platform_driver(octeon_mgmt_driver); diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index ea75f27502..646ca0bc25 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -76,7 +76,7 @@ struct l2t_data { atomic_t nfree; /* number of free entries */ rwlock_t lock; struct rcu_head rcu_head; /* to handle rcu cleanup */ - struct l2t_entry l2tab[]; + struct l2t_entry l2tab[] __counted_by(nentries); }; typedef void (*arp_failure_handler_func)(struct t3cdev * dev, diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 2e9a74fe09..6268f96cb4 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2501,14 +2501,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) return work_done; } -/* - * Returns true if the device is already scheduled for polling. - */ -static inline int napi_is_scheduled(struct napi_struct *napi) -{ - return test_bit(NAPI_STATE_SCHED, &napi->state); -} - /** * process_pure_responses - process pure responses from a response queue * @adap: the adapter @@ -2674,12 +2666,7 @@ static int rspq_check_napi(struct sge_qset *qs) { struct sge_rspq *q = &qs->rspq; - if (!napi_is_scheduled(&qs->napi) && - is_new_response(&q->desc[q->cidx], q)) { - napi_schedule(&qs->napi); - return 1; - } - return 0; + return is_new_response(&q->desc[q->cidx], q) && napi_schedule(&qs->napi); } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 290c105806..847c7fc2bb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h @@ -29,7 +29,7 @@ struct clip_tbl { atomic_t nfree; struct list_head ce_free_head; void *cl_list; - struct list_head hash_list[]; + struct list_head hash_list[] __counted_by(clipt_size); }; enum { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index f59dd4b2ae..9050568a03 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -331,6 +331,6 @@ struct cxgb4_link { struct cxgb4_tc_u32_table { unsigned int size; /* number of entries in table */ - struct cxgb4_link table[]; /* Jump table */ + struct cxgb4_link table[] __counted_by(size); /* Jump table */ }; #endif /* __CXGB4_TC_U32_PARSE_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index a10a6862a9..1e5f5b1a22 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -59,7 +59,7 @@ struct l2t_data { rwlock_t lock; atomic_t nfree; /* number of free entries */ struct l2t_entry *rover; /* starting point for next allocation */ - struct l2t_entry l2tab[]; /* MUST BE LAST */ + struct l2t_entry l2tab[] __counted_by(l2t_size); /* MUST BE LAST */ }; static inline unsigned int vlan_prio(const struct l2t_entry *e) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 5f8b871d79..6b3c778815 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -82,7 +82,7 @@ struct sched_class { struct sched_table { /* per port scheduling table */ u8 sched_size; - struct sched_class tab[]; + struct sched_class tab[] __counted_by(sched_size); }; static inline bool can_sched(struct net_device *dev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 98dd78551d..b5ff2e1a99 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -4261,7 +4261,7 @@ static void sge_rx_timer_cb(struct timer_list *t) if (fl_starving(adap, fl)) { rxq = container_of(fl, struct sge_eth_rxq, fl); - if (napi_reschedule(&rxq->rspq.napi)) + if (napi_schedule(&rxq->rspq.napi)) fl->starving++; else set_bit(id, s->starving_fl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h index 541249d789..109c1dff56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.h +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h @@ -66,7 +66,7 @@ struct smt_entry { struct smt_data { unsigned int smt_size; rwlock_t lock; - struct smt_entry smtab[]; + struct smt_entry smtab[] __counted_by(smt_size); }; struct smt_data *t4_init_smt(void); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 2d0cf76fb3..5b1d746e65 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2094,7 +2094,7 @@ static void sge_rx_timer_cb(struct timer_list *t) struct sge_eth_rxq *rxq; rxq = container_of(fl, struct sge_eth_rxq, fl); - if (napi_reschedule(&rxq->rspq.napi)) + if (napi_schedule(&rxq->rspq.napi)) fl->starving++; else set_bit(id, s->starving_fl); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index 3731c93f8f..c7338ac6a5 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -39,7 +39,6 @@ #include #include -#include #include #include #include @@ -49,7 +48,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h index 1d110d2edd..0d42e7d157 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h @@ -4,7 +4,6 @@ #ifndef __CHCR_IPSEC_H__ #define __CHCR_IPSEC_H__ -#include #include "t4_hw.h" #include "cxgb4.h" #include "t4_msg.h" diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index bcdc7fc2f4..6482728794 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -361,9 +361,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev, struct tls_context *tls_ctx, enum tls_offload_ctx_dir direction) { - struct chcr_ktls_ofld_ctx_tx *tx_ctx = - chcr_get_ktls_tx_context(tls_ctx); - struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; + struct chcr_ktls_info *tx_info = chcr_get_ktls_tx_info(tls_ctx); struct ch_ktls_port_stats_debug *port_stats; struct chcr_ktls_uld_ctx *u_ctx; @@ -396,7 +394,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev, port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; atomic64_inc(&port_stats->ktls_tx_connection_close); kvfree(tx_info); - tx_ctx->chcr_info = NULL; + chcr_set_ktls_tx_info(tls_ctx, NULL); /* release module refcount */ module_put(THIS_MODULE); } @@ -417,7 +415,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct ch_ktls_port_stats_debug *port_stats; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct chcr_ktls_uld_ctx *u_ctx; struct chcr_ktls_info *tx_info; struct dst_entry *dst; @@ -427,8 +424,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, u8 daaddr[16]; int ret = -1; - tx_ctx = chcr_get_ktls_tx_context(tls_ctx); - pi = netdev_priv(netdev); adap = pi->adapter; port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id]; @@ -440,7 +435,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, goto out; } - if (tx_ctx->chcr_info) + if (chcr_get_ktls_tx_info(tls_ctx)) goto out; if (u_ctx && u_ctx->detach) @@ -566,7 +561,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, goto free_tid; atomic64_inc(&port_stats->ktls_tx_ctx); - tx_ctx->chcr_info = tx_info; + chcr_set_ktls_tx_info(tls_ctx, tx_info); return 0; @@ -647,7 +642,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, { const struct cpl_act_open_rpl *p = (void *)input; struct chcr_ktls_info *tx_info = NULL; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; + struct tls_offload_context_tx *tx_ctx; struct chcr_ktls_uld_ctx *u_ctx; unsigned int atid, tid, status; struct tls_context *tls_ctx; @@ -686,7 +681,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family); /* Adding tid */ tls_ctx = tls_get_ctx(tx_info->sk); - tx_ctx = chcr_get_ktls_tx_context(tls_ctx); + tx_ctx = tls_offload_ctx_tx(tls_ctx); u_ctx = adap->uld[CXGB4_ULD_KTLS].handle; if (u_ctx) { ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx, @@ -1924,7 +1919,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) { u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset; struct ch_ktls_port_stats_debug *port_stats; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; + struct tls_offload_context_tx *tx_ctx; struct ch_ktls_stats_debug *stats; struct tcphdr *th = tcp_hdr(skb); int data_len, qidx, ret = 0, mss; @@ -1944,6 +1939,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len; tls_ctx = tls_get_ctx(skb->sk); + tx_ctx = tls_offload_ctx_tx(tls_ctx); tls_netdev = rcu_dereference_bh(tls_ctx->netdev); /* Don't quit on NULL: if tls_device_down is running in parallel, * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was @@ -1952,8 +1948,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(tls_netdev && tls_netdev != dev)) goto out; - tx_ctx = chcr_get_ktls_tx_context(tls_ctx); - tx_info = tx_ctx->chcr_info; + tx_info = chcr_get_ktls_tx_info(tls_ctx); if (unlikely(!tx_info)) goto out; @@ -1979,19 +1974,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) * we will send the complete record again. */ - spin_lock_irqsave(&tx_ctx->base.lock, flags); + spin_lock_irqsave(&tx_ctx->lock, flags); do { cxgb4_reclaim_completed_tx(adap, &q->q, true); /* fetch the tls record */ - record = tls_get_record(&tx_ctx->base, tcp_seq, + record = tls_get_record(tx_ctx, tcp_seq, &tx_info->record_no); /* By the time packet reached to us, ACK is received, and record * won't be found in that case, handle it gracefully. */ if (unlikely(!record)) { - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data); goto out; } @@ -2015,7 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) tls_end_offset != record->len); if (ret) { - spin_unlock_irqrestore(&tx_ctx->base.lock, + spin_unlock_irqrestore(&tx_ctx->lock, flags); goto out; } @@ -2046,7 +2041,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) /* free the refcount taken earlier */ if (tls_end_offset < data_len) dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); goto out; } @@ -2082,7 +2077,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) /* if any failure, come out from the loop. */ if (ret) { - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); if (th->fin) dev_kfree_skb_any(skb); @@ -2097,7 +2092,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) } while (data_len > 0); - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); @@ -2185,17 +2180,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info) static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx) { struct ch_ktls_port_stats_debug *port_stats; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; + struct tls_offload_context_tx *tx_ctx; struct chcr_ktls_info *tx_info; unsigned long index; xa_for_each(&u_ctx->tid_list, index, tx_ctx) { - tx_info = tx_ctx->chcr_info; + tx_info = __chcr_get_ktls_tx_info(tx_ctx); clear_conn_resources(tx_info); port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; atomic64_inc(&port_stats->ktls_tx_connection_close); kvfree(tx_info); - tx_ctx->chcr_info = NULL; + memset(tx_ctx->driver_state, 0, TLS_DRIVER_STATE_SIZE_TX); /* release module refcount */ module_put(THIS_MODULE); } diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h index 10572dc553..dbbba92bf5 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h @@ -67,8 +67,7 @@ struct chcr_ktls_info { bool pending_close; }; -struct chcr_ktls_ofld_ctx_tx { - struct tls_offload_context_tx base; +struct chcr_ktls_ctx_tx { struct chcr_ktls_info *chcr_info; }; @@ -79,14 +78,33 @@ struct chcr_ktls_uld_ctx { bool detach; }; -static inline struct chcr_ktls_ofld_ctx_tx * -chcr_get_ktls_tx_context(struct tls_context *tls_ctx) +static inline struct chcr_ktls_info * +__chcr_get_ktls_tx_info(struct tls_offload_context_tx *octx) { - BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) > - TLS_OFFLOAD_CONTEXT_SIZE_TX); - return container_of(tls_offload_ctx_tx(tls_ctx), - struct chcr_ktls_ofld_ctx_tx, - base); + struct chcr_ktls_ctx_tx *priv_ctx; + + BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX); + priv_ctx = (struct chcr_ktls_ctx_tx *)octx->driver_state; + return priv_ctx->chcr_info; +} + +static inline struct chcr_ktls_info * +chcr_get_ktls_tx_info(struct tls_context *tls_ctx) +{ + struct chcr_ktls_ctx_tx *priv_ctx; + + BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX); + priv_ctx = (struct chcr_ktls_ctx_tx *)__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + return priv_ctx->chcr_info; +} + +static inline void +chcr_set_ktls_tx_info(struct tls_context *tls_ctx, struct chcr_ktls_info *chcr_info) +{ + struct chcr_ktls_ctx_tx *priv_ctx; + + priv_ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + priv_ctx->chcr_info = chcr_info; } static inline int chcr_get_first_rx_qid(struct adapter *adap) diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h index 62f62bff74..7ff82b6778 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h @@ -7,7 +7,6 @@ #define __CHTLS_H__ #include -#include #include #include #include diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index d323c5c235..0a21a10a79 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1879,7 +1879,7 @@ free: return err; } -static int cs89x0_platform_remove(struct platform_device *pdev) +static void cs89x0_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -1889,7 +1889,6 @@ static int cs89x0_platform_remove(struct platform_device *pdev) */ unregister_netdev(dev); free_netdev(dev); - return 0; } static const struct of_device_id __maybe_unused cs89x0_match[] = { @@ -1904,7 +1903,7 @@ static struct platform_driver cs89x0_driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(cs89x0_match), }, - .remove = cs89x0_platform_remove, + .remove_new = cs89x0_platform_remove, }; module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 8627ab19d4..1c2a540db1 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -757,7 +757,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) } -static int ep93xx_eth_remove(struct platform_device *pdev) +static void ep93xx_eth_remove(struct platform_device *pdev) { struct net_device *dev; struct ep93xx_priv *ep; @@ -765,7 +765,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev) dev = platform_get_drvdata(pdev); if (dev == NULL) - return 0; + return; ep = netdev_priv(dev); @@ -782,8 +782,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev) } free_netdev(dev); - - return 0; } static int ep93xx_eth_probe(struct platform_device *pdev) @@ -862,7 +860,7 @@ err_out: static struct platform_driver ep93xx_eth_driver = { .probe = ep93xx_eth_probe, - .remove = ep93xx_eth_remove, + .remove_new = ep93xx_eth_remove, .driver = { .name = "ep93xx-eth", }, diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 21a70b1f0a..887876f35f 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -556,19 +556,18 @@ static int set_mac_address(struct net_device *dev, void *addr) MODULE_LICENSE("GPL"); -static int mac89x0_device_remove(struct platform_device *pdev) +static void mac89x0_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); nubus_writew(0, dev->base_addr + ADD_PORT); free_netdev(dev); - return 0; } static struct platform_driver mac89x0_platform_driver = { .probe = mac89x0_device_probe, - .remove = mac89x0_device_remove, + .remove_new = mac89x0_device_remove, .driver = { .name = "mac89x0", }, diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 636949737d..78287cfcbf 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2531,13 +2531,11 @@ unprepare: return ret; } -static int gemini_ethernet_port_remove(struct platform_device *pdev) +static void gemini_ethernet_port_remove(struct platform_device *pdev) { struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); - - return 0; } static const struct of_device_id gemini_ethernet_port_of_match[] = { @@ -2554,7 +2552,7 @@ static struct platform_driver gemini_ethernet_port_driver = { .of_match_table = gemini_ethernet_port_of_match, }, .probe = gemini_ethernet_port_probe, - .remove = gemini_ethernet_port_remove, + .remove_new = gemini_ethernet_port_remove, }; static int gemini_ethernet_probe(struct platform_device *pdev) @@ -2596,14 +2594,12 @@ static int gemini_ethernet_probe(struct platform_device *pdev) return devm_of_platform_populate(dev); } -static int gemini_ethernet_remove(struct platform_device *pdev) +static void gemini_ethernet_remove(struct platform_device *pdev) { struct gemini_ethernet *geth = platform_get_drvdata(pdev); geth_cleanup_freeq(geth); geth->initialized = false; - - return 0; } static const struct of_device_id gemini_ethernet_of_match[] = { @@ -2620,7 +2616,7 @@ static struct platform_driver gemini_ethernet_driver = { .of_match_table = gemini_ethernet_of_match, }, .probe = gemini_ethernet_probe, - .remove = gemini_ethernet_remove, + .remove_new = gemini_ethernet_remove, }; static int __init gemini_ethernet_module_init(void) diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 05a89ab676..150cc94ae9 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1770,8 +1770,7 @@ static const struct dev_pm_ops dm9000_drv_pm_ops = { .resume = dm9000_drv_resume, }; -static int -dm9000_drv_remove(struct platform_device *pdev) +static void dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct board_info *dm = to_dm9000_board(ndev); @@ -1783,7 +1782,6 @@ dm9000_drv_remove(struct platform_device *pdev) regulator_disable(dm->power_supply); dev_dbg(&pdev->dev, "released and freed device\n"); - return 0; } #ifdef CONFIG_OF @@ -1801,7 +1799,7 @@ static struct platform_driver dm9000_driver = { .of_match_table = of_match_ptr(dm9000_of_matches), }, .probe = dm9000_probe, - .remove = dm9000_drv_remove, + .remove_new = dm9000_drv_remove, }; module_platform_driver(dm9000_driver); diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h index 0ed598dc75..bd786dfbc0 100644 --- a/drivers/net/ethernet/dec/tulip/tulip.h +++ b/drivers/net/ethernet/dec/tulip/tulip.h @@ -381,7 +381,7 @@ struct mediatable { unsigned has_reset:6; u32 csr15dir; u32 csr15val; /* 21143 NWay setting. */ - struct medialeaf mleaf[]; + struct medialeaf mleaf[] __counted_by(leafcount); }; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 151ca9573b..2a18df3605 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -841,7 +841,7 @@ err_out_free_dev: return err; } -static int dnet_remove(struct platform_device *pdev) +static void dnet_remove(struct platform_device *pdev) { struct net_device *dev; @@ -859,13 +859,11 @@ static int dnet_remove(struct platform_device *pdev) free_irq(dev->irq, dev); free_netdev(dev); } - - return 0; } static struct platform_driver dnet_driver = { .probe = dnet_probe, - .remove = dnet_remove, + .remove_new = dnet_remove, .driver = { .name = "dnet", }, diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h index 55e1caf193..64c97eb66f 100644 --- a/drivers/net/ethernet/engleder/tsnep_hw.h +++ b/drivers/net/ethernet/engleder/tsnep_hw.h @@ -181,6 +181,8 @@ struct tsnep_gcl_operation { #define TSNEP_DESC_SIZE 256 #define TSNEP_DESC_SIZE_DATA_AFTER 2048 #define TSNEP_DESC_OFFSET 128 +#define TSNEP_DESC_SIZE_DATA_AFTER_INLINE (64 - sizeof(struct tsnep_tx_desc) + \ + sizeof_field(struct tsnep_tx_desc, tx)) #define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000 #define TSNEP_DESC_OWNER_COUNTER_SHIFT 30 #define TSNEP_DESC_LENGTH_MASK 0x00003FFF diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 08e113e785..64eadd3207 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -51,12 +51,22 @@ #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) -#define TSNEP_TX_TYPE_SKB BIT(0) -#define TSNEP_TX_TYPE_SKB_FRAG BIT(1) -#define TSNEP_TX_TYPE_XDP_TX BIT(2) -#define TSNEP_TX_TYPE_XDP_NDO BIT(3) -#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) -#define TSNEP_TX_TYPE_XSK BIT(4) +/* mapping type */ +#define TSNEP_TX_TYPE_MAP BIT(0) +#define TSNEP_TX_TYPE_MAP_PAGE BIT(1) +#define TSNEP_TX_TYPE_INLINE BIT(2) +/* buffer type */ +#define TSNEP_TX_TYPE_SKB BIT(8) +#define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP) +#define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE) +#define TSNEP_TX_TYPE_SKB_FRAG BIT(9) +#define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE) +#define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE) +#define TSNEP_TX_TYPE_XDP_TX BIT(10) +#define TSNEP_TX_TYPE_XDP_NDO BIT(11) +#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE) +#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) +#define TSNEP_TX_TYPE_XSK BIT(12) #define TSNEP_XDP_TX BIT(0) #define TSNEP_XDP_REDIRECT BIT(1) @@ -416,6 +426,8 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; entry->desc->more_properties = __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); + if (entry->type & TSNEP_TX_TYPE_INLINE) + entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; /* descriptor properties shall be written last, because valid data is * signaled there @@ -433,39 +445,79 @@ static int tsnep_tx_desc_available(struct tsnep_tx *tx) return tx->read - tx->write - 1; } +static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry, + struct device *dmadev, dma_addr_t *dma) +{ + unsigned int len; + int mapped; + + len = skb_frag_size(frag); + if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { + *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE); + if (dma_mapping_error(dmadev, *dma)) + return -ENOMEM; + entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; + mapped = 1; + } else { + void *fragdata = skb_frag_address_safe(frag); + + if (likely(fragdata)) { + memcpy(&entry->desc->tx, fragdata, len); + } else { + struct page *page = skb_frag_page(frag); + + fragdata = kmap_local_page(page); + memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), + len); + kunmap_local(fragdata); + } + entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; + mapped = 0; + } + + return mapped; +} + static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) { struct device *dmadev = tx->adapter->dmadev; struct tsnep_tx_entry *entry; unsigned int len; - dma_addr_t dma; int map_len = 0; - int i; + dma_addr_t dma; + int i, mapped; for (i = 0; i < count; i++) { entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; if (!i) { len = skb_headlen(skb); - dma = dma_map_single(dmadev, skb->data, len, - DMA_TO_DEVICE); - - entry->type = TSNEP_TX_TYPE_SKB; + if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { + dma = dma_map_single(dmadev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(dmadev, dma)) + return -ENOMEM; + entry->type = TSNEP_TX_TYPE_SKB_MAP; + mapped = 1; + } else { + memcpy(&entry->desc->tx, skb->data, len); + entry->type = TSNEP_TX_TYPE_SKB_INLINE; + mapped = 0; + } } else { - len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); - dma = skb_frag_dma_map(dmadev, - &skb_shinfo(skb)->frags[i - 1], - 0, len, DMA_TO_DEVICE); + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - entry->type = TSNEP_TX_TYPE_SKB_FRAG; + len = skb_frag_size(frag); + mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma); + if (mapped < 0) + return mapped; } - if (dma_mapping_error(dmadev, dma)) - return -ENOMEM; entry->len = len; - dma_unmap_addr_set(entry, dma, dma); - - entry->desc->tx = __cpu_to_le64(dma); + if (likely(mapped)) { + dma_unmap_addr_set(entry, dma, dma); + entry->desc->tx = __cpu_to_le64(dma); + } map_len += len; } @@ -484,13 +536,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; if (entry->len) { - if (entry->type & TSNEP_TX_TYPE_SKB) + if (entry->type & TSNEP_TX_TYPE_MAP) dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), DMA_TO_DEVICE); - else if (entry->type & - (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO)) + else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) dma_unmap_page(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), @@ -586,7 +637,7 @@ static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, if (dma_mapping_error(dmadev, dma)) return -ENOMEM; - entry->type = TSNEP_TX_TYPE_XDP_NDO; + entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; } else { page = unlikely(frag) ? skb_frag_page(frag) : virt_to_page(xdpf->data); @@ -668,17 +719,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, struct xdp_buff *xdp, - struct netdev_queue *tx_nq, struct tsnep_tx *tx) + struct netdev_queue *tx_nq, struct tsnep_tx *tx, + bool zc) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); bool xmit; + u32 type; if (unlikely(!xdpf)) return false; + /* no page pool for zero copy */ + if (zc) + type = TSNEP_TX_TYPE_XDP_NDO; + else + type = TSNEP_TX_TYPE_XDP_TX; + __netif_tx_lock(tx_nq, smp_processor_id()); - xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); + xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); /* Avoid transmit queue timeout since we share it with the slow path */ if (xmit) @@ -1222,7 +1281,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@ -1272,7 +1331,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@ -2600,7 +2659,7 @@ mdio_init_failed: return retval; } -static int tsnep_remove(struct platform_device *pdev) +static void tsnep_remove(struct platform_device *pdev) { struct tsnep_adapter *adapter = platform_get_drvdata(pdev); @@ -2616,8 +2675,6 @@ static int tsnep_remove(struct platform_device *pdev) mdiobus_unregister(adapter->mdiobus); tsnep_disable_irq(adapter, ECM_INT_ALL); - - return 0; } static const struct of_device_id tsnep_of_match[] = { @@ -2632,7 +2689,7 @@ static struct platform_driver tsnep_driver = { .of_match_table = tsnep_of_match, }, .probe = tsnep_probe, - .remove = tsnep_remove, + .remove_new = tsnep_remove, }; module_platform_driver(tsnep_driver); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 95cbad198b..ad41c90190 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1254,7 +1254,7 @@ out: * ethoc_remove - shutdown OpenCores ethernet MAC * @pdev: platform device */ -static int ethoc_remove(struct platform_device *pdev) +static void ethoc_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); @@ -1271,8 +1271,6 @@ static int ethoc_remove(struct platform_device *pdev) unregister_netdev(netdev); free_netdev(netdev); } - - return 0; } #ifdef CONFIG_PM @@ -1298,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match); static struct platform_driver ethoc_driver = { .probe = ethoc_probe, - .remove = ethoc_remove, + .remove_new = ethoc_remove, .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index edf000e7ba..4d7184d468 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -198,7 +198,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) */ if (nps_enet_is_tx_pending(priv)) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); - napi_reschedule(napi); + napi_schedule(napi); } } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 9135b918dd..fddfd1dd50 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -2012,7 +2012,7 @@ err_alloc_etherdev: return err; } -static int ftgmac100_remove(struct platform_device *pdev) +static void ftgmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftgmac100 *priv; @@ -2040,7 +2040,6 @@ static int ftgmac100_remove(struct platform_device *pdev) netif_napi_del(&priv->napi); free_netdev(netdev); - return 0; } static const struct of_device_id ftgmac100_of_match[] = { @@ -2051,7 +2050,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match); static struct platform_driver ftgmac100_driver = { .probe = ftgmac100_probe, - .remove = ftgmac100_remove, + .remove_new = ftgmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftgmac100_of_match, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 183069581b..003bc9a45c 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1219,7 +1219,7 @@ err_alloc_etherdev: return err; } -static int ftmac100_remove(struct platform_device *pdev) +static void ftmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftmac100 *priv; @@ -1234,7 +1234,6 @@ static int ftmac100_remove(struct platform_device *pdev) netif_napi_del(&priv->napi); free_netdev(netdev); - return 0; } static const struct of_device_id ftmac100_of_ids[] = { @@ -1244,7 +1243,7 @@ static const struct of_device_id ftmac100_of_ids[] = { static struct platform_driver ftmac100_driver = { .probe = ftmac100_probe, - .remove = ftmac100_remove, + .remove_new = ftmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftmac100_of_ids diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index b92e3aa7cd..cffbf27c46 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1655,7 +1655,7 @@ out: rx_ring->stats.bytes += rx_byte_cnt; if (xdp_redirect_frm_cnt) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_tx_frm_cnt) enetc_update_tx_ring_tail(tx_ring); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 7439739cd8..a9c2ff2243 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -297,7 +297,7 @@ struct enetc_int_vector { char name[ENETC_INT_NAME_MAX]; struct enetc_bdr rx_ring; - struct enetc_bdr tx_ring[]; + struct enetc_bdr tx_ring[] __counted_by(count_tx_rings); } ____cacheline_aligned_in_smp; struct enetc_cls_rule { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 2513b44056..b65da49dd9 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -443,7 +443,7 @@ struct enetc_psfp_gate { u32 num_entries; refcount_t refcount; struct hlist_node node; - struct action_gate_entry entries[]; + struct action_gate_entry entries[] __counted_by(num_entries); }; /* Only enable the green color frame now diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 54da59286d..c107680985 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -52,11 +52,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -186,66 +186,23 @@ static struct platform_device_id fec_devtype[] = { /* keep it for coldfire */ .name = DRIVER_NAME, .driver_data = 0, - }, { - .name = "imx25-fec", - .driver_data = (kernel_ulong_t)&fec_imx25_info, - }, { - .name = "imx27-fec", - .driver_data = (kernel_ulong_t)&fec_imx27_info, - }, { - .name = "imx28-fec", - .driver_data = (kernel_ulong_t)&fec_imx28_info, - }, { - .name = "imx6q-fec", - .driver_data = (kernel_ulong_t)&fec_imx6q_info, - }, { - .name = "mvf600-fec", - .driver_data = (kernel_ulong_t)&fec_mvf600_info, - }, { - .name = "imx6sx-fec", - .driver_data = (kernel_ulong_t)&fec_imx6x_info, - }, { - .name = "imx6ul-fec", - .driver_data = (kernel_ulong_t)&fec_imx6ul_info, - }, { - .name = "imx8mq-fec", - .driver_data = (kernel_ulong_t)&fec_imx8mq_info, - }, { - .name = "imx8qm-fec", - .driver_data = (kernel_ulong_t)&fec_imx8qm_info, - }, { - .name = "s32v234-fec", - .driver_data = (kernel_ulong_t)&fec_s32v234_info, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, fec_devtype); -enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ - IMX27_FEC, /* runs on i.mx27/35/51 */ - IMX28_FEC, - IMX6Q_FEC, - MVF600_FEC, - IMX6SX_FEC, - IMX6UL_FEC, - IMX8MQ_FEC, - IMX8QM_FEC, - S32V234_FEC, -}; - static const struct of_device_id fec_dt_ids[] = { - { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, - { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, - { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, - { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, - { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, - { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, - { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, - { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, - { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], }, + { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, + { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, + { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, + { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, + { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, + { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, + { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, + { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -406,6 +363,70 @@ static void fec_dump(struct net_device *ndev) } while (bdp != txq->bd.base); } +/* + * Coldfire does not support DMA coherent allocations, and has historically used + * a band-aid with a manual flush in fec_enet_rx_queue. + */ +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); +} +#else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_coherent(dev, size, handle, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_coherent(dev, size, cpu_addr, handle); +} +#endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ + +struct fec_dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; +}; + +static void fec_dmam_release(struct device *dev, void *res) +{ + struct fec_dma_devres *this = res; + + fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); +} + +static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + struct fec_dma_devres *dr; + void *vaddr; + + dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + vaddr = fec_dma_alloc(dev, size, handle, gfp); + if (!vaddr) { + devres_free(dr); + return NULL; + } + dr->vaddr = vaddr; + dr->dma_handle = *handle; + dr->size = size; + devres_add(dev, dr); + return vaddr; +} + static inline bool is_ipv4_pkt(struct sk_buff *skb) { return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; @@ -1660,7 +1681,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) } #endif -#ifdef CONFIG_M532x +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) + /* + * Hacky flush of all caches instead of using the DMA API for the TSO + * headers. + */ flush_cache_all(); #endif rxq = fep->rx_queue[queue_id]; @@ -1832,7 +1857,7 @@ rx_processing_done: rxq->bd.cur = bdp; if (xdp_result & FEC_ENET_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); return pkt_received; } @@ -2909,12 +2934,10 @@ static void fec_enet_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { - memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "%s", fec_stats[i].name); } for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { - strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "%s", fec_xdp_stat_strs[i]); } page_pool_ethtool_stats_get_strings(data); @@ -3290,10 +3313,9 @@ static void fec_enet_free_queue(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; - dma_free_coherent(&fep->pdev->dev, - txq->bd.ring_size * TSO_HEADER_SIZE, - txq->tso_hdrs, - txq->tso_hdrs_dma); + fec_dma_free(&fep->pdev->dev, + txq->bd.ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_dma); } for (i = 0; i < fep->num_rx_queues; i++) @@ -3323,10 +3345,9 @@ static int fec_enet_alloc_queue(struct net_device *ndev) txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; - txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, + txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, txq->bd.ring_size * TSO_HEADER_SIZE, - &txq->tso_hdrs_dma, - GFP_KERNEL); + &txq->tso_hdrs_dma, GFP_KERNEL); if (!txq->tso_hdrs) { ret = -ENOMEM; goto alloc_failed; @@ -4040,8 +4061,8 @@ static int fec_enet_init(struct net_device *ndev) bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; /* Allocate memory for buffer descriptors. */ - cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, - GFP_KERNEL); + cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); if (!cbd_base) { ret = -ENOMEM; goto free_queue_mem; @@ -4289,14 +4310,13 @@ fec_probe(struct platform_device *pdev) phy_interface_t interface; struct net_device *ndev; int i, irq, ret = 0; - const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; int num_tx_qs; int num_rx_qs; char irq_name[8]; int irq_cnt; - struct fec_devinfo *dev_info; + const struct fec_devinfo *dev_info; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -4311,10 +4331,9 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); - of_id = of_match_device(fec_dt_ids, &pdev->dev); - if (of_id) - pdev->id_entry = of_id->data; - dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; + dev_info = device_get_match_data(&pdev->dev); + if (!dev_info) + dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; if (dev_info) fep->quirks = dev_info->quirks; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 3b75cc543b..9ba15d3183 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -618,18 +618,17 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) return 0; } -static void memac_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) +static unsigned long memac_get_caps(struct phylink_config *config, + phy_interface_t interface) { struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; unsigned long caps = config->mac_capabilities; - if (phy_interface_mode_is_rgmii(state->interface) && + if (phy_interface_mode_is_rgmii(interface) && memac->rgmii_no_half_duplex) caps &= ~(MAC_10HD | MAC_100HD); - phylink_validate_mask_caps(supported, state, caps); + return caps; } /** @@ -776,7 +775,7 @@ static void memac_link_down(struct phylink_config *config, unsigned int mode, } static const struct phylink_mac_ops memac_mac_ops = { - .validate = memac_validate, + .mac_get_caps = memac_get_caps, .mac_select_pcs = memac_select_pcs, .mac_prepare = memac_prepare, .mac_config = memac_mac_config, diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index a6dfc8807d..cf392faa61 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -35,10 +35,9 @@ #include #include #include +#include #include #include -#include -#include #include #include @@ -884,9 +883,9 @@ static const struct ethtool_ops fs_ethtool_ops = { /**************************************************************************************/ #ifdef CONFIG_FS_ENET_HAS_FEC -#define IS_FEC(match) ((match)->data == &fs_fec_ops) +#define IS_FEC(ops) ((ops) == &fs_fec_ops) #else -#define IS_FEC(match) 0 +#define IS_FEC(ops) 0 #endif static const struct net_device_ops fs_enet_netdev_ops = { @@ -903,10 +902,9 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; -static const struct of_device_id fs_enet_match[]; static int fs_enet_probe(struct platform_device *ofdev) { - const struct of_device_id *match; + const struct fs_ops *ops; struct net_device *ndev; struct fs_enet_private *fep; struct fs_platform_info *fpi; @@ -916,15 +914,15 @@ static int fs_enet_probe(struct platform_device *ofdev) const char *phy_connection_type; int privsize, len, ret = -ENODEV; - match = of_match_device(fs_enet_match, &ofdev->dev); - if (!match) + ops = device_get_match_data(&ofdev->dev); + if (!ops) return -EINVAL; fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); if (!fpi) return -ENOMEM; - if (!IS_FEC(match)) { + if (!IS_FEC(ops)) { data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) goto out_free_fpi; @@ -986,7 +984,7 @@ static int fs_enet_probe(struct platform_device *ofdev) fep->dev = &ofdev->dev; fep->ndev = ndev; fep->fpi = fpi; - fep->ops = match->data; + fep->ops = ops; ret = fep->ops->setup_data(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index a1e777a4b7..7bb6972795 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -30,9 +30,10 @@ #include #include #include +#include +#include #include #include -#include #include #include @@ -96,20 +97,15 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, } -static const struct of_device_id fs_enet_mdio_fec_match[]; static int fs_enet_mdio_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct resource res; struct mii_bus *new_bus; struct fec_info *fec; int (*get_bus_freq)(struct device *); int ret = -ENOMEM, clock, speed; - match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); - if (!match) - return -EINVAL; - get_bus_freq = match->data; + get_bus_freq = device_get_match_data(&ofdev->dev); new_bus = mdiobus_alloc(); if (!new_bus) diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index eee675a25b..70dd982a5e 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -19,9 +19,10 @@ #include #include #include +#include #include #include -#include +#include #include #if IS_ENABLED(CONFIG_UCC_GETH) @@ -407,8 +408,6 @@ static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev, static int fsl_pq_mdio_probe(struct platform_device *pdev) { - const struct of_device_id *id = - of_match_device(fsl_pq_mdio_match, &pdev->dev); const struct fsl_pq_mdio_data *data; struct device_node *np = pdev->dev.of_node; struct resource res; @@ -417,15 +416,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) struct mii_bus *new_bus; int err; - if (!id) { + data = device_get_match_data(&pdev->dev); + if (!data) { dev_err(&pdev->dev, "Failed to match device\n"); return -ENODEV; } - data = id->data; - - dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); - new_bus = mdiobus_alloc_size(sizeof(*priv)); if (!new_bus) return -ENOMEM; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 5703240474..2d42e73383 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -284,7 +284,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) if (block->rx) reschedule |= gve_rx_work_pending(block->rx); - if (reschedule && napi_reschedule(napi)) + if (reschedule && napi_schedule(napi)) iowrite32be(GVE_IRQ_MASK, irq_doorbell); } return work_done; diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 7365534790..93ff7c8ec9 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -362,7 +362,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, - u16 packet_buffer_size, u16 len, + unsigned int truesize, u16 len, struct gve_rx_ctx *ctx) { u32 offset = page_info->page_offset + page_info->pad; @@ -395,10 +395,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, if (skb != ctx->skb_head) { ctx->skb_head->len += len; ctx->skb_head->data_len += len; - ctx->skb_head->truesize += packet_buffer_size; + ctx->skb_head->truesize += truesize; } skb_add_rx_frag(skb, num_frags, page_info->page, - offset, len, packet_buffer_size); + offset, len, truesize); return ctx->skb_head; } @@ -492,7 +492,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, memcpy(alloc_page_info.page_address, src, page_info->pad + len); skb = gve_rx_add_frags(napi, &alloc_page_info, - rx->packet_buffer_size, + PAGE_SIZE, len, ctx); u64_stats_update_begin(&rx->statss); diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index ecf92a5d56..b91e7a06b9 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -1021,7 +1021,7 @@ init_fail: return ret; } -static int hip04_remove(struct platform_device *pdev) +static void hip04_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hip04_priv *priv = netdev_priv(ndev); @@ -1035,8 +1035,6 @@ static int hip04_remove(struct platform_device *pdev) of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); - - return 0; } static const struct of_device_id hip04_mac_match[] = { @@ -1048,7 +1046,7 @@ MODULE_DEVICE_TABLE(of, hip04_mac_match); static struct platform_driver hip04_mac_driver = { .probe = hip04_mac_probe, - .remove = hip04_remove, + .remove_new = hip04_remove, .driver = { .name = DRV_NAME, .of_match_table = hip04_mac_match, diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index cb7b0293fe..2406263c9d 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -893,7 +893,7 @@ out_free_netdev: return ret; } -static int hisi_femac_drv_remove(struct platform_device *pdev) +static void hisi_femac_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hisi_femac_priv *priv = netdev_priv(ndev); @@ -904,8 +904,6 @@ static int hisi_femac_drv_remove(struct platform_device *pdev) phy_disconnect(ndev->phydev); clk_disable_unprepare(priv->clk); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -961,7 +959,7 @@ static struct platform_driver hisi_femac_driver = { .of_match_table = hisi_femac_match, }, .probe = hisi_femac_drv_probe, - .remove = hisi_femac_drv_remove, + .remove_new = hisi_femac_drv_remove, #ifdef CONFIG_PM .suspend = hisi_femac_drv_suspend, .resume = hisi_femac_drv_resume, diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 26d22bb04b..1a972b093a 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -7,7 +7,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -1094,7 +1095,6 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; - const struct of_device_id *of_id = NULL; struct net_device *ndev; struct hix5hd2_priv *priv; struct mii_bus *bus; @@ -1110,12 +1110,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) priv->dev = dev; priv->netdev = ndev; - of_id = of_match_device(hix5hd2_of_match, dev); - if (!of_id) { - ret = -EINVAL; - goto out_free_netdev; - } - priv->hw_cap = (unsigned long)of_id->data; + priv->hw_cap = (unsigned long)device_get_match_data(dev); priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { @@ -1282,7 +1277,7 @@ out_free_netdev: return ret; } -static int hix5hd2_dev_remove(struct platform_device *pdev) +static void hix5hd2_dev_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hix5hd2_priv *priv = netdev_priv(ndev); @@ -1298,8 +1293,6 @@ static int hix5hd2_dev_remove(struct platform_device *pdev) of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); - - return 0; } static const struct of_device_id hix5hd2_of_match[] = { @@ -1319,7 +1312,7 @@ static struct platform_driver hix5hd2_dev_driver = { .of_match_table = hix5hd2_of_match, }, .probe = hix5hd2_dev_probe, - .remove = hix5hd2_dev_remove, + .remove_new = hix5hd2_dev_remove, }; module_platform_driver(hix5hd2_dev_driver); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index fcaf5132b8..1b67da1f6f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3007,7 +3007,7 @@ free_dev: * hns_dsaf_remove - remove dsaf dev * @pdev: dasf platform device */ -static int hns_dsaf_remove(struct platform_device *pdev) +static void hns_dsaf_remove(struct platform_device *pdev) { struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev); @@ -3020,8 +3020,6 @@ static int hns_dsaf_remove(struct platform_device *pdev) hns_dsaf_free(dsaf_dev); hns_dsaf_free_dev(dsaf_dev); - - return 0; } static const struct of_device_id g_dsaf_match[] = { @@ -3033,7 +3031,7 @@ MODULE_DEVICE_TABLE(of, g_dsaf_match); static struct platform_driver g_dsaf_driver = { .probe = hns_dsaf_probe, - .remove = hns_dsaf_remove, + .remove_new = hns_dsaf_remove, .driver = { .name = DSAF_DRV_NAME, .of_match_table = g_dsaf_match, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 0f0e16f9af..7e00231c1a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -92,7 +92,7 @@ struct ppe_common_cb { u8 comm_index; /*ppe_common index*/ u32 ppe_num; - struct hns_ppe_cb ppe_cb[]; + struct hns_ppe_cb ppe_cb[] __counted_by(ppe_num); }; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index a9f8059256..c1e9b69978 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -108,7 +108,7 @@ struct rcb_common_cb { u32 ring_num; u32 desc_num; /* desc num per queue*/ - struct ring_pair_cb ring_pair_cb[]; + struct ring_pair_cb ring_pair_cb[] __counted_by(ring_num); }; int hns_rcb_buf_size2type(u32 buf_size); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 85722afe21..8a713eed44 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2393,7 +2393,7 @@ out_read_prop_fail: return ret; } -static int hns_nic_dev_remove(struct platform_device *pdev) +static void hns_nic_dev_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hns_nic_priv *priv = netdev_priv(ndev); @@ -2422,7 +2422,6 @@ static int hns_nic_dev_remove(struct platform_device *pdev) of_node_put(to_of_node(priv->fwnode)); free_netdev(ndev); - return 0; } static const struct of_device_id hns_enet_of_match[] = { @@ -2440,7 +2439,7 @@ static struct platform_driver hns_nic_dev_driver = { .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), }, .probe = hns_nic_dev_probe, - .remove = hns_nic_dev_remove, + .remove_new = hns_nic_dev_remove, }; module_platform_driver(hns_nic_dev_driver); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index aaf1f42624..d7e175a9cb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -103,6 +103,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_LANE_NUM_B, HNAE3_DEV_SUPPORT_WOL_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B, + HNAE3_DEV_SUPPORT_VF_FAULT_B, }; #define hnae3_ae_dev_fd_supported(ae_dev) \ @@ -177,6 +178,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_tm_flush_supported(hdev) \ test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps) +#define hnae3_ae_dev_vf_fault_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -271,6 +275,7 @@ enum hnae3_reset_type { HNAE3_GLOBAL_RESET, HNAE3_IMP_RESET, HNAE3_NONE_RESET, + HNAE3_VF_EXP_RESET, HNAE3_MAX_RESET, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index dcecb23daa..d92ad6082d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -157,6 +157,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B}, {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B}, + {HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 2b7197ce0a..533c19d25e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -93,6 +93,7 @@ enum hclge_opcode_type { HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, HCLGE_OPC_QUERY_DEV_SPECS = 0x0050, + HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067, /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, @@ -348,6 +349,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_GRO_B = 20, HCLGE_COMM_CAP_FD_B = 21, HCLGE_COMM_CAP_FEC_STATS_B = 25, + HCLGE_COMM_CAP_VF_FAULT_B = 26, HCLGE_COMM_CAP_LANE_NUM_B = 27, HCLGE_COMM_CAP_WOL_B = 28, HCLGE_COMM_CAP_TM_FLUSH_B = 31, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 4f385a18d2..c083d1d107 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -414,6 +414,9 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = { }, { .name = "support tm flush", .cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B, + }, { + .name = "support vf fault detect", + .cap_bit = HNAE3_DEV_SUPPORT_VF_FAULT_B, } }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 677cfaa5fe..b618797a7e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -4940,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) { struct page_pool_params pp_params = { - .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | - PP_FLAG_DMA_SYNC_DEV, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .order = hns3_page_order(ring), .pool_size = ring->desc_num * hns3_buf_size(ring) / (PAGE_SIZE << hns3_page_order(ring)), diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 3f35227ef1..d63e114f93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1301,10 +1301,12 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = { .msg = "tqp_int_ecc_error" }, { .type_id = PF_ABNORMAL_INT_ERROR, - .msg = "pf_abnormal_int_error" + .msg = "pf_abnormal_int_error", + .cause_by_vf = true }, { .type_id = MPF_ABNORMAL_INT_ERROR, - .msg = "mpf_abnormal_int_error" + .msg = "mpf_abnormal_int_error", + .cause_by_vf = true }, { .type_id = COMMON_ERROR, .msg = "common_error" @@ -2759,7 +2761,7 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev) hclge_handle_error_info_log(ae_dev); } -static void +static bool hclge_handle_error_type_reg_log(struct device *dev, struct hclge_mod_err_info *mod_info, struct hclge_type_reg_err_info *type_reg_info) @@ -2770,6 +2772,7 @@ hclge_handle_error_type_reg_log(struct device *dev, u8 mod_id, total_module, type_id, total_type, i, is_ras; u8 index_module = MODULE_NONE; u8 index_type = NONE_ERROR; + bool cause_by_vf = false; mod_id = mod_info->mod_id; type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK; @@ -2788,6 +2791,7 @@ hclge_handle_error_type_reg_log(struct device *dev, for (i = 0; i < total_type; i++) { if (type_id == hclge_hw_type_id_st[i].type_id) { index_type = i; + cause_by_vf = hclge_hw_type_id_st[i].cause_by_vf; break; } } @@ -2805,6 +2809,8 @@ hclge_handle_error_type_reg_log(struct device *dev, dev_err(dev, "reg_value:\n"); for (i = 0; i < type_reg_info->reg_num; i++) dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]); + + return cause_by_vf; } static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, @@ -2815,6 +2821,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, struct device *dev = &hdev->pdev->dev; struct hclge_mod_err_info *mod_info; struct hclge_sum_err_info *sum_info; + bool cause_by_vf = false; u8 mod_num, err_num, i; u32 offset = 0; @@ -2843,12 +2850,16 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, type_reg_info = (struct hclge_type_reg_err_info *) &buf[offset++]; - hclge_handle_error_type_reg_log(dev, mod_info, - type_reg_info); + if (hclge_handle_error_type_reg_log(dev, mod_info, + type_reg_info)) + cause_by_vf = true; offset += type_reg_info->reg_num; } } + + if (hnae3_ae_dev_vf_fault_supported(hdev->ae_dev) && cause_by_vf) + set_bit(HNAE3_VF_EXP_RESET, &ae_dev->hw_err_reset_req); } static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num) @@ -2940,3 +2951,98 @@ err_desc: out: return ret; } + +static bool hclge_reset_vf_in_bitmap(struct hclge_dev *hdev, + unsigned long *bitmap) +{ + struct hclge_vport *vport; + bool exist_set = false; + int func_id; + int ret; + + func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM); + if (func_id == PF_VPORT_ID) + return false; + + while (func_id != HCLGE_VPORT_NUM) { + vport = hclge_get_vf_vport(hdev, + func_id - HCLGE_VF_VPORT_START_NUM); + if (!vport) { + dev_err(&hdev->pdev->dev, "invalid func id(%d)\n", + func_id); + return false; + } + + dev_info(&hdev->pdev->dev, "do function %d recovery.", func_id); + + ret = hclge_reset_tqp(&vport->nic); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to reset tqp, ret = %d.", ret); + return false; + } + + ret = hclge_inform_vf_reset(vport, HNAE3_VF_FUNC_RESET); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to reset func %d, ret = %d.", + func_id, ret); + return false; + } + + exist_set = true; + clear_bit(func_id, bitmap); + func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM); + } + + return exist_set; +} + +static void hclge_get_vf_fault_bitmap(struct hclge_desc *desc, + unsigned long *bitmap) +{ +#define HCLGE_FIR_FAULT_BYTES 24 +#define HCLGE_SEC_FAULT_BYTES 8 + + u8 *buff; + + BUILD_BUG_ON(HCLGE_FIR_FAULT_BYTES + HCLGE_SEC_FAULT_BYTES != + BITS_TO_BYTES(HCLGE_VPORT_NUM)); + + memcpy(bitmap, desc[0].data, HCLGE_FIR_FAULT_BYTES); + buff = (u8 *)bitmap + HCLGE_FIR_FAULT_BYTES; + memcpy(buff, desc[1].data, HCLGE_SEC_FAULT_BYTES); +} + +int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev) +{ + unsigned long vf_fault_bitmap[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + struct hclge_desc desc[2]; + bool cause_by_vf = false; + int ret; + + if (!test_and_clear_bit(HNAE3_VF_EXP_RESET, + &hdev->ae_dev->hw_err_reset_req) || + !hnae3_ae_dev_vf_fault_supported(hdev->ae_dev)) + return 0; + + hclge_comm_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_GET_QUEUE_ERR_VF, + true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_GET_QUEUE_ERR_VF, + true); + + ret = hclge_comm_cmd_send(&hdev->hw.hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vf bitmap, ret = %d.\n", ret); + return ret; + } + hclge_get_vf_fault_bitmap(desc, vf_fault_bitmap); + + cause_by_vf = hclge_reset_vf_in_bitmap(hdev, vf_fault_bitmap); + if (cause_by_vf) + hdev->ae_dev->hw_err_reset_req = 0; + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 86be6fb329..68b738affa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -196,6 +196,7 @@ struct hclge_hw_module_id { struct hclge_hw_type_id { enum hclge_err_type_list type_id; const char *msg; + bool cause_by_vf; /* indicate the error may from vf exception */ }; struct hclge_sum_err_info { @@ -228,4 +229,5 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, unsigned long *reset_requests); int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev); int hclge_handle_mac_tnl(struct hclge_dev *hdev); +int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a61d9fd732..5ea9e59569 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -882,8 +882,8 @@ static const struct hclge_speed_bit_map speed_bit_map[] = { {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, - {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, - {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, }; @@ -940,100 +940,98 @@ static void hclge_update_fec_support(struct hclge_mac *mac) mac->supported); } +static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = { + {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, +}; + static void hclge_convert_setting_sr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { + if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_lr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit( - ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { + if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_cr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { + if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_kr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_1G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { + if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_fec(struct hclge_mac *mac) @@ -1159,10 +1157,10 @@ static u32 hclge_get_max_speed(u16 speed_ability) if (speed_ability & HCLGE_SUPPORT_200G_BIT) return HCLGE_MAC_SPEED_200G; - if (speed_ability & HCLGE_SUPPORT_100G_BIT) + if (speed_ability & HCLGE_SUPPORT_100G_BITS) return HCLGE_MAC_SPEED_100G; - if (speed_ability & HCLGE_SUPPORT_50G_BIT) + if (speed_ability & HCLGE_SUPPORT_50G_BITS) return HCLGE_MAC_SPEED_50G; if (speed_ability & HCLGE_SUPPORT_40G_BIT) @@ -3428,7 +3426,7 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } -static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) +struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) { if (!pci_num_vf(hdev->pdev)) { dev_err(&hdev->pdev->dev, @@ -4472,6 +4470,7 @@ static void hclge_handle_err_recovery(struct hclge_dev *hdev) if (hclge_find_error_source(hdev)) { hclge_handle_error_info_log(ae_dev); hclge_handle_mac_tnl(hdev); + hclge_handle_vf_queue_err_ras(hdev); } hclge_handle_err_reset_request(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7bc2049b72..51979cf712 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -185,15 +185,22 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_1G_BIT BIT(0) #define HCLGE_SUPPORT_10G_BIT BIT(1) #define HCLGE_SUPPORT_25G_BIT BIT(2) -#define HCLGE_SUPPORT_50G_BIT BIT(3) -#define HCLGE_SUPPORT_100G_BIT BIT(4) +#define HCLGE_SUPPORT_50G_R2_BIT BIT(3) +#define HCLGE_SUPPORT_100G_R4_BIT BIT(4) /* to be compatible with exsit board */ #define HCLGE_SUPPORT_40G_BIT BIT(5) #define HCLGE_SUPPORT_100M_BIT BIT(6) #define HCLGE_SUPPORT_10M_BIT BIT(7) #define HCLGE_SUPPORT_200G_BIT BIT(8) +#define HCLGE_SUPPORT_50G_R1_BIT BIT(9) +#define HCLGE_SUPPORT_100G_R2_BIT BIT(10) + #define HCLGE_SUPPORT_GE \ (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) +#define HCLGE_SUPPORT_50G_BITS \ + (HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT) +#define HCLGE_SUPPORT_100G_BITS \ + (HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, @@ -1076,6 +1083,11 @@ struct hclge_mac_speed_map { u32 speed_fw; /* speed defined in firmware */ }; +struct hclge_link_mode_bmap { + u16 support_bit; + enum ethtool_link_mode_bit_indices link_mode; +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, @@ -1146,4 +1158,6 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len); int hclge_push_vf_link_status(struct hclge_vport *vport); int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en); int hclge_mac_update_stats(struct hclge_dev *hdev); +struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf); +int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 04ff9bf121..4b0d07ca25 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -124,7 +124,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } -static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) +int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) { __le16 msg_data; u8 dest_vfid; diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 409a89d802..ed73707176 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -610,7 +610,7 @@ static int hns_mdio_probe(struct platform_device *pdev) * * Return 0 on success, negative on failure */ -static int hns_mdio_remove(struct platform_device *pdev) +static void hns_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus; @@ -618,7 +618,6 @@ static int hns_mdio_remove(struct platform_device *pdev) mdiobus_unregister(bus); platform_set_drvdata(pdev, NULL); - return 0; } static const struct of_device_id hns_mdio_match[] = { @@ -636,7 +635,7 @@ MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match); static struct platform_driver hns_mdio_driver = { .probe = hns_mdio_probe, - .remove = hns_mdio_remove, + .remove_new = hns_mdio_remove, .driver = { .name = MDIO_DRV_NAME, .of_match_table = hns_mdio_match, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 1749d26f4b..03e42512a2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -315,136 +315,76 @@ void hinic_devlink_unregister(struct hinic_devlink_priv *priv) devlink_unregister(devlink); } -static int chip_fault_show(struct devlink_fmsg *fmsg, - struct hinic_fault_event *event) +static void chip_fault_show(struct devlink_fmsg *fmsg, + struct hinic_fault_event *event) { const char * const level_str[FAULT_LEVEL_MAX + 1] = { "fatal", "reset", "flr", "general", "suggestion", "Unknown"}; u8 fault_level; - int err; fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ? event->event.chip.err_level : FAULT_LEVEL_MAX; - if (fault_level == FAULT_LEVEL_SERIOUS_FLR) { - err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id", - (u32)event->event.chip.func_id); - if (err) - return err; - } - - err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr", - event->event.chip.err_csr_addr); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value", - event->event.chip.err_csr_value); - if (err) - return err; - - return 0; + if (fault_level == FAULT_LEVEL_SERIOUS_FLR) + devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id", + (u32)event->event.chip.func_id); + devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id); + devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type); + devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]); + devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr", + event->event.chip.err_csr_addr); + devlink_fmsg_u32_pair_put(fmsg, "err_csr_value", + event->event.chip.err_csr_value); } -static int fault_report_show(struct devlink_fmsg *fmsg, - struct hinic_fault_event *event) +static void fault_report_show(struct devlink_fmsg *fmsg, + struct hinic_fault_event *event) { const char * const type_str[FAULT_TYPE_MAX + 1] = { "chip", "ucode", "mem rd timeout", "mem wr timeout", "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"}; u8 fault_type; - int err; fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX; - err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]); - if (err) - return err; - - err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data", - event->event.val, sizeof(event->event.val)); - if (err) - return err; + devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]); + devlink_fmsg_binary_pair_put(fmsg, "Fault raw data", event->event.val, + sizeof(event->event.val)); switch (event->type) { case FAULT_TYPE_CHIP: - err = chip_fault_show(fmsg, event); - if (err) - return err; + chip_fault_show(fmsg, event); break; case FAULT_TYPE_UCODE: - err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id); + devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id); + devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id); + devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc); break; case FAULT_TYPE_MEM_RD_TIMEOUT: case FAULT_TYPE_MEM_WR_TIMEOUT: - err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl", - event->event.mem_timeout.err_csr_ctrl); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data", - event->event.mem_timeout.err_csr_data); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab", - event->event.mem_timeout.ctrl_tab); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "mem_index", - event->event.mem_timeout.mem_index); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl", + event->event.mem_timeout.err_csr_ctrl); + devlink_fmsg_u32_pair_put(fmsg, "err_csr_data", + event->event.mem_timeout.err_csr_data); + devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab", + event->event.mem_timeout.ctrl_tab); + devlink_fmsg_u32_pair_put(fmsg, "mem_index", + event->event.mem_timeout.mem_index); break; case FAULT_TYPE_REG_RD_TIMEOUT: case FAULT_TYPE_REG_WR_TIMEOUT: - err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr); break; case FAULT_TYPE_PHY_FAULT: - err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type); + devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id); + devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad); + devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr); + devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data); break; default: break; } - - return 0; } static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter, @@ -452,75 +392,30 @@ static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { if (priv_ctx) - return fault_report_show(fmsg, priv_ctx); + fault_report_show(fmsg, priv_ctx); return 0; } -static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg, - struct hinic_mgmt_watchdog_info *watchdog_info) +static void mgmt_watchdog_report_show(struct devlink_fmsg *fmsg, + struct hinic_mgmt_watchdog_info *winfo) { - int err; - - err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr); - if (err) - return err; - - err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info", - watchdog_info->reg, sizeof(watchdog_info->reg)); - if (err) - return err; - - err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)", - watchdog_info->data, sizeof(watchdog_info->data)); - if (err) - return err; - - return 0; + devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", winfo->curr_time_h); + devlink_fmsg_u32_pair_put(fmsg, "time_l", winfo->curr_time_l); + devlink_fmsg_u32_pair_put(fmsg, "task_id", winfo->task_id); + devlink_fmsg_u32_pair_put(fmsg, "sp", winfo->sp); + devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", winfo->curr_used); + devlink_fmsg_u32_pair_put(fmsg, "peak_used", winfo->peak_used); + devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", winfo->is_overflow); + devlink_fmsg_u32_pair_put(fmsg, "stack_top", winfo->stack_top); + devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", winfo->stack_bottom); + devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", winfo->pc); + devlink_fmsg_u32_pair_put(fmsg, "lr", winfo->lr); + devlink_fmsg_u32_pair_put(fmsg, "cpsr", winfo->cpsr); + devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info", winfo->reg, + sizeof(winfo->reg)); + devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)", + winfo->data, sizeof(winfo->data)); } static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter, @@ -528,7 +423,7 @@ static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { if (priv_ctx) - return mgmt_watchdog_report_show(fmsg, priv_ctx); + mgmt_watchdog_report_show(fmsg, priv_ctx); return 0; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index ad47ac51a1..9b60966736 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -861,7 +861,7 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; - int err, irqname_len; + int err; txq->netdev = netdev; txq->sq = sq; @@ -882,15 +882,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, goto err_alloc_free_sges; } - irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1; - txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); + txq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, "%s_txq%d", + netdev->name, qp->q_id); if (!txq->irq_name) { err = -ENOMEM; goto err_alloc_irqname; } - sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id); - err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, CI_UPDATE_NO_COALESC); if (err) diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 54bb4d9a0d..813403c262 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -153,7 +153,7 @@ probe_failed_free_mpu: return retval; } -static int sni_82596_driver_remove(struct platform_device *pdev) +static void sni_82596_driver_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct i596_private *lp = netdev_priv(dev); @@ -164,12 +164,11 @@ static int sni_82596_driver_remove(struct platform_device *pdev) iounmap(lp->ca); iounmap(lp->mpu_port); free_netdev (dev); - return 0; } static struct platform_driver sni_82596_driver = { .probe = sni_82596_probe, - .remove = sni_82596_driver_remove, + .remove_new = sni_82596_driver_remove, .driver = { .name = sni_82596_string, }, diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 0a56e97524..1e29e5c9a2 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -90,7 +90,7 @@ static struct ehea_bcmc_reg_array ehea_bcmc_regs; static int ehea_probe_adapter(struct platform_device *dev); -static int ehea_remove(struct platform_device *dev); +static void ehea_remove(struct platform_device *dev); static const struct of_device_id ehea_module_device_table[] = { { @@ -121,7 +121,7 @@ static struct platform_driver ehea_driver = { .of_match_table = ehea_device_table, }, .probe = ehea_probe_adapter, - .remove = ehea_remove, + .remove_new = ehea_remove, }; void ehea_dump(void *adr, int len, char *msg) @@ -900,7 +900,7 @@ static int ehea_poll(struct napi_struct *napi, int budget) if (!cqe && !cqe_skb) return rx; - if (!napi_reschedule(napi)) + if (!napi_schedule(napi)) return rx; cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); @@ -3471,7 +3471,7 @@ out: return ret; } -static int ehea_remove(struct platform_device *dev) +static void ehea_remove(struct platform_device *dev) { struct ehea_adapter *adapter = platform_get_drvdata(dev); int i; @@ -3492,8 +3492,6 @@ static int ehea_remove(struct platform_device *dev) list_del(&adapter->list); ehea_update_firmware_handles(); - - return 0; } static int check_module_parm(void) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 0c314bf974..e6e47b1842 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -3253,7 +3253,7 @@ static int emac_probe(struct platform_device *ofdev) return err; } -static int emac_remove(struct platform_device *ofdev) +static void emac_remove(struct platform_device *ofdev) { struct emac_instance *dev = platform_get_drvdata(ofdev); @@ -3290,8 +3290,6 @@ static int emac_remove(struct platform_device *ofdev) irq_dispose_mapping(dev->emac_irq); free_netdev(dev->ndev); - - return 0; } /* XXX Features in here should be replaced by properties... */ @@ -3319,7 +3317,7 @@ static struct platform_driver emac_driver = { .of_match_table = emac_match, }, .probe = emac_probe, - .remove = emac_remove, + .remove_new = emac_remove, }; static void __init emac_make_bootlist(void) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index c3236b59e7..2439f7e96e 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -442,7 +442,7 @@ static int mal_poll(struct napi_struct *napi, int budget) if (unlikely(mc->ops->peek_rx(mc->dev) || test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { MAL_DBG2(mal, "rotting packet" NL); - if (!napi_reschedule(napi)) + if (!napi_schedule(napi)) goto more_work; spin_lock_irqsave(&mal->lock, flags); @@ -711,7 +711,7 @@ static int mal_probe(struct platform_device *ofdev) return err; } -static int mal_remove(struct platform_device *ofdev) +static void mal_remove(struct platform_device *ofdev) { struct mal_instance *mal = platform_get_drvdata(ofdev); @@ -740,8 +740,6 @@ static int mal_remove(struct platform_device *ofdev) NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, mal->bd_dma); kfree(mal); - - return 0; } static const struct of_device_id mal_platform_match[] = @@ -770,7 +768,7 @@ static struct platform_driver mal_of_driver = { .of_match_table = mal_platform_match, }, .probe = mal_probe, - .remove = mal_remove, + .remove_new = mal_remove, }; int __init mal_init(void) diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index fd437f986e..e1712fdc3c 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -273,7 +273,7 @@ static int rgmii_probe(struct platform_device *ofdev) return rc; } -static int rgmii_remove(struct platform_device *ofdev) +static void rgmii_remove(struct platform_device *ofdev) { struct rgmii_instance *dev = platform_get_drvdata(ofdev); @@ -281,8 +281,6 @@ static int rgmii_remove(struct platform_device *ofdev) iounmap(dev->base); kfree(dev); - - return 0; } static const struct of_device_id rgmii_match[] = @@ -302,7 +300,7 @@ static struct platform_driver rgmii_driver = { .of_match_table = rgmii_match, }, .probe = rgmii_probe, - .remove = rgmii_remove, + .remove_new = rgmii_remove, }; int __init rgmii_init(void) diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index aae9a88d95..fa3488258c 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -130,7 +130,7 @@ static int tah_probe(struct platform_device *ofdev) return rc; } -static int tah_remove(struct platform_device *ofdev) +static void tah_remove(struct platform_device *ofdev) { struct tah_instance *dev = platform_get_drvdata(ofdev); @@ -138,8 +138,6 @@ static int tah_remove(struct platform_device *ofdev) iounmap(dev->base); kfree(dev); - - return 0; } static const struct of_device_id tah_match[] = @@ -160,7 +158,7 @@ static struct platform_driver tah_driver = { .of_match_table = tah_match, }, .probe = tah_probe, - .remove = tah_remove, + .remove_new = tah_remove, }; int __init tah_init(void) diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 6337388ee5..26e86cdee2 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -278,7 +278,7 @@ static int zmii_probe(struct platform_device *ofdev) return rc; } -static int zmii_remove(struct platform_device *ofdev) +static void zmii_remove(struct platform_device *ofdev) { struct zmii_instance *dev = platform_get_drvdata(ofdev); @@ -286,8 +286,6 @@ static int zmii_remove(struct platform_device *ofdev) iounmap(dev->base); kfree(dev); - - return 0; } static const struct of_device_id zmii_match[] = @@ -308,7 +306,7 @@ static struct platform_driver zmii_driver = { .of_match_table = zmii_match, }, .probe = zmii_probe, - .remove = zmii_remove, + .remove_new = zmii_remove, }; int __init zmii_init(void) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index a8d79ee350..b5aef0b29e 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1432,7 +1432,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) BUG_ON(lpar_rc != H_SUCCESS); if (ibmveth_rxq_pending_buffer(adapter) && - napi_reschedule(napi)) { + napi_schedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cdf5251e56..30c47b8470 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3519,7 +3519,7 @@ restart_poll: if (napi_complete_done(napi, frames_processed)) { enable_scrq_irq(adapter, rx_scrq); if (pending_scrq(adapter, rx_scrq)) { - if (napi_reschedule(napi)) { + if (napi_schedule(napi)) { disable_scrq_irq(adapter, rx_scrq); goto restart_poll; } @@ -5247,7 +5247,8 @@ static void handle_vpd_rsp(union ibmvnic_crq *crq, /* copy firmware version string from vpd into adapter */ if ((substr + 3 + fw_level_len) < (adapter->vpd->buff + adapter->vpd->len)) { - strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); + strscpy(adapter->fw_version, substr + 3, + sizeof(adapter->fw_version)); } else { dev_info(dev, "FW substr extrapolated VPD buff\n"); } diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 9bc0a95198..06ddd7147c 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -225,6 +225,7 @@ config I40E depends on PTP_1588_CLOCK_OPTIONAL depends on PCI select AUXILIARY_BUS + select NET_DEVLINK help This driver supports Intel(R) Ethernet Controller XL710 Family of devices. For more information on how to identify your adapter, go @@ -284,6 +285,7 @@ config ICE select DIMLIB select NET_DEVLINK select PLDMFW + select DPLL help This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go @@ -355,5 +357,17 @@ config IGC To compile this driver as a module, choose M here. The module will be called igc. +config IDPF + tristate "Intel(R) Infrastructure Data Path Function Support" + depends on PCI_MSI + select DIMLIB + select PAGE_POOL + select PAGE_POOL_STATS + help + This driver supports Intel(R) Infrastructure Data Path Function + devices. + + To compile this driver as a module, choose M here. The module + will be called idpf. endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index d80d041320..dacb481ee5 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IAVF) += iavf/ obj-$(CONFIG_FM10K) += fm10k/ obj-$(CONFIG_ICE) += ice/ +obj-$(CONFIG_IDPF) += idpf/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index d3fdc29093..01f0f12035 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2841,7 +2841,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &e100_netdev_ops; netdev->ethtool_ops = &e100_ethtool_ops; netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); nic = netdev_priv(netdev); netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 4542e2bc28..4576511c99 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -5,6 +5,7 @@ * Shared functions for accessing and configuring the MAC */ +#include #include "e1000.h" static s32 e1000_check_downshift(struct e1000_hw *hw); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index da6e303ad9..1d1e93686a 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1014,7 +1014,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, e1000_clean); - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); adapter->bd_number = cards_found; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index a187582d22..ba9c19e699 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) * bits to count nanoseconds leaving the rest for fractional nonseconds. + * + * Any given INCVALUE also has an associated maximum adjustment value. This + * maximum adjustment value is the largest increase (or decrease) which can be + * safely applied without overflowing the INCVALUE. Since INCVALUE has + * a maximum range of 24 bits, its largest value is 0xFFFFFF. + * + * To understand where the maximum value comes from, consider the following + * equation: + * + * new_incval = base_incval + (base_incval * adjustment) / 1billion + * + * To avoid overflow that means: + * max_incval = base_incval + (base_incval * max_adj) / billion + * + * Re-arranging: + * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion) */ #define INCVALUE_96MHZ 125 #define INCVALUE_SHIFT_96MHZ 17 #define INCPERIOD_SHIFT_96MHZ 2 #define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ) +#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */ #define INCVALUE_25MHZ 40 #define INCVALUE_SHIFT_25MHZ 18 #define INCPERIOD_25MHZ 1 +#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */ #define INCVALUE_24MHZ 125 #define INCVALUE_SHIFT_24MHZ 14 #define INCPERIOD_24MHZ 3 +#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */ #define INCVALUE_38400KHZ 26 #define INCVALUE_SHIFT_38400KHZ 19 #define INCPERIOD_38400KHZ 1 +#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */ /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 02d871bc11..bbcfd52939 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch2lan: + adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ; + break; case e1000_pch_lpt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) + adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ; + else + adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; + break; case e1000_pch_spt: + adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; + break; case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: @@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: - if ((hw->mac.type < e1000_pch_lpt) || - (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { - adapter->ptp_clock_info.max_adj = 24000000 - 1; - break; - } - fallthrough; + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) + adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; + else + adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ; + break; case e1000_82574: case e1000_82583: - adapter->ptp_clock_info.max_adj = 600000000 - 1; + adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; break; default: break; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index d53369e300..13a05604dc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -448,10 +448,10 @@ static void fm10k_get_drvinfo(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); - strncpy(info->driver, fm10k_driver_name, - sizeof(info->driver) - 1); - strncpy(info->bus_info, pci_name(interface->pdev), - sizeof(info->bus_info) - 1); + strscpy(info->driver, fm10k_driver_name, + sizeof(info->driver)); + strscpy(info->bus_info, pci_name(interface->pdev), + sizeof(info->bus_info)); } static void fm10k_get_pauseparam(struct net_device *dev, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index af1b0cde36..ae700a1807 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2019 Intel Corporation. */ +#include #include "fm10k_pf.h" #include "fm10k_vf.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index dc8ccd378e..c50928ec14 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2019 Intel Corporation. */ +#include #include "fm10k_vf.h" /** diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 2f21b3e89f..cad93f323b 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -24,6 +24,7 @@ i40e-objs := i40e_main.o \ i40e_ddp.o \ i40e_client.o \ i40e_virtchnl_pf.o \ - i40e_xsk.o + i40e_xsk.o \ + i40e_devlink.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 55bb0b5310..1bf424ac31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -4,47 +4,21 @@ #ifndef _I40E_H_ #define _I40E_H_ -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include +#include +#include #include -#include -#include -#include #include -#include -#include -#include "i40e_type.h" +#include "i40e_dcb.h" +#include "i40e_debug.h" +#include "i40e_devlink.h" +#include "i40e_io.h" #include "i40e_prototype.h" -#include -#include -#include "i40e_virtchnl_pf.h" +#include "i40e_register.h" #include "i40e_txrx.h" -#include "i40e_dcb.h" /* Useful i40e defaults */ #define I40E_MAX_VEB 16 @@ -75,23 +49,19 @@ #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) -#define I40E_NVM_VERSION_LO_SHIFT 0 -#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_HI_SHIFT 12 -#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) -#define I40E_OEM_VER_BUILD_MASK 0xffff -#define I40E_OEM_VER_PATCH_MASK 0xff -#define I40E_OEM_VER_BUILD_SHIFT 8 -#define I40E_OEM_VER_SHIFT 24 #define I40E_PHY_DEBUG_ALL \ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW) #define I40E_OEM_EETRACK_ID 0xffffffff -#define I40E_OEM_GEN_SHIFT 24 -#define I40E_OEM_SNAP_MASK 0x00ff0000 -#define I40E_OEM_SNAP_SHIFT 16 -#define I40E_OEM_RELEASE_MASK 0x0000ffff +#define I40E_NVM_VERSION_LO_MASK GENMASK(7, 0) +#define I40E_NVM_VERSION_HI_MASK GENMASK(15, 12) +#define I40E_OEM_VER_BUILD_MASK GENMASK(23, 8) +#define I40E_OEM_VER_PATCH_MASK GENMASK(7, 0) +#define I40E_OEM_VER_MASK GENMASK(31, 24) +#define I40E_OEM_GEN_MASK GENMASK(31, 24) +#define I40E_OEM_SNAP_MASK GENMASK(23, 16) +#define I40E_OEM_RELEASE_MASK GENMASK(15, 0) #define I40E_RX_DESC(R, i) \ (&(((union i40e_rx_desc *)((R)->desc))[i])) @@ -323,29 +293,6 @@ struct i40e_udp_port_config { u8 filter_index; }; -#define I40_DDP_FLASH_REGION 100 -#define I40E_PROFILE_INFO_SIZE 48 -#define I40E_MAX_PROFILE_NUM 16 -#define I40E_PROFILE_LIST_SIZE \ - (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4) -#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/" -#define I40E_DDP_PROFILE_NAME_MAX 64 - -int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, - bool is_add); -int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); - -struct i40e_ddp_profile_list { - u32 p_count; - struct i40e_profile_info p_info[]; -}; - -struct i40e_ddp_old_profile_list { - struct list_head list; - size_t old_ddp_size; - u8 old_ddp_buf[]; -}; - /* macros related to FLX_PIT */ #define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ @@ -462,6 +409,7 @@ static inline const u8 *i40e_channel_mac(struct i40e_channel *ch) /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; + struct devlink_port devlink_port; struct i40e_hw hw; DECLARE_BITMAP(state, __I40E_STATE_SIZE__); struct msix_entry *msix_entries; @@ -1002,43 +950,104 @@ struct i40e_device { }; /** - * i40e_nvm_version_str - format the NVM version strings + * i40e_info_nvm_ver - format the NVM version string * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + * + * Formats NVM version string as: + * .. when eetrackid == I40E_OEM_EETRACK_ID + * . otherwise **/ -static inline char *i40e_nvm_version_str(struct i40e_hw *hw) +static inline void i40e_info_nvm_ver(struct i40e_hw *hw, char *buf, size_t len) { - static char buf[32]; - u32 full_ver; + struct i40e_nvm_info *nvm = &hw->nvm; - full_ver = hw->nvm.oem_ver; - - if (hw->nvm.eetrack == I40E_OEM_EETRACK_ID) { + if (nvm->eetrack == I40E_OEM_EETRACK_ID) { + u32 full_ver = nvm->oem_ver; u8 gen, snap; u16 release; - gen = (u8)(full_ver >> I40E_OEM_GEN_SHIFT); - snap = (u8)((full_ver & I40E_OEM_SNAP_MASK) >> - I40E_OEM_SNAP_SHIFT); - release = (u16)(full_ver & I40E_OEM_RELEASE_MASK); - - snprintf(buf, sizeof(buf), "%x.%x.%x", gen, snap, release); + gen = FIELD_GET(I40E_OEM_GEN_MASK, full_ver); + snap = FIELD_GET(I40E_OEM_SNAP_MASK, full_ver); + release = FIELD_GET(I40E_OEM_RELEASE_MASK, full_ver); + snprintf(buf, len, "%x.%x.%x", gen, snap, release); } else { - u8 ver, patch; + u8 major, minor; + + major = FIELD_GET(I40E_NVM_VERSION_HI_MASK, nvm->version); + minor = FIELD_GET(I40E_NVM_VERSION_LO_MASK, nvm->version); + snprintf(buf, len, "%x.%02x", major, minor); + } +} + +/** + * i40e_info_eetrack - format the EETrackID string + * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + * + * Returns hexadecimally formated EETrackID if it is + * different from I40E_OEM_EETRACK_ID or empty string. + **/ +static inline void i40e_info_eetrack(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + + buf[0] = '\0'; + if (nvm->eetrack != I40E_OEM_EETRACK_ID) + snprintf(buf, len, "0x%08x", nvm->eetrack); +} + +/** + * i40e_info_civd_ver - format the NVM version strings + * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + * + * Returns formated combo image version if adapter's EETrackID is + * different from I40E_OEM_EETRACK_ID or empty string. + **/ +static inline void i40e_info_civd_ver(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + + buf[0] = '\0'; + if (nvm->eetrack != I40E_OEM_EETRACK_ID) { + u32 full_ver = nvm->oem_ver; + u8 major, minor; u16 build; - ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); - build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) & - I40E_OEM_VER_BUILD_MASK); - patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); - - snprintf(buf, sizeof(buf), - "%x.%02x 0x%x %d.%d.%d", - (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> - I40E_NVM_VERSION_HI_SHIFT, - (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> - I40E_NVM_VERSION_LO_SHIFT, - hw->nvm.eetrack, ver, build, patch); + major = FIELD_GET(I40E_OEM_VER_MASK, full_ver); + build = FIELD_GET(I40E_OEM_VER_BUILD_MASK, full_ver); + minor = FIELD_GET(I40E_OEM_VER_PATCH_MASK, full_ver); + snprintf(buf, len, "%d.%d.%d", major, build, minor); } +} + +/** + * i40e_nvm_version_str - format the NVM version strings + * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + **/ +static inline char *i40e_nvm_version_str(struct i40e_hw *hw, char *buf, + size_t len) +{ + char ver[16] = " "; + + /* Get NVM version */ + i40e_info_nvm_ver(hw, buf, len); + + /* Append EETrackID if provided */ + i40e_info_eetrack(hw, &ver[1], sizeof(ver) - 1); + if (strlen(ver) > 1) + strlcat(buf, ver, len); + + /* Append combo image version if provided */ + i40e_info_civd_ver(hw, &ver[1], sizeof(ver) - 1); + if (strlen(ver) > 1) + strlcat(buf, ver, len); return buf; } @@ -1321,4 +1330,15 @@ static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) return pf->flags & I40E_FLAG_TC_MQPRIO; } +/** + * i40e_hw_to_pf - get pf pointer from the hardware structure + * @hw: pointer to the device HW structure + **/ +static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw) +{ + return container_of(hw, struct i40e_pf, hw); +} + +struct device *i40e_hw_to_dev(struct i40e_hw *hw); + #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 100eb77b8d..9ce6e633cc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e_type.h" +#include +#include "i40e_alloc.h" #include "i40e_register.h" -#include "i40e_adminq.h" #include "i40e_prototype.h" static void i40e_resume_aq(struct i40e_hw *hw); @@ -51,7 +51,6 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, - i40e_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); @@ -78,7 +77,6 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, - i40e_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); @@ -136,7 +134,6 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw) for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, - i40e_mem_arq_buf, hw->aq.arq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) @@ -198,7 +195,6 @@ static int i40e_alloc_asq_bufs(struct i40e_hw *hw) for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, - i40e_mem_asq_buf, hw->aq.asq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 267f2e0a21..80125bea80 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -4,7 +4,8 @@ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ -#include "i40e_osdep.h" +#include +#include "i40e_alloc.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 3357d65a90..18a1c3b6d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -4,6 +4,8 @@ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ +#include + /* This header file defines the i40e Admin Queue commands and is shared between * i40e Firmware and Software. * diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index a6c9a9e343..e0dde32625 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -4,25 +4,25 @@ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ +#include + struct i40e_hw; -/* Memory allocation types */ -enum i40e_memory_type { - i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ - i40e_mem_asq_buf = 1, - i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ - i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ - i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ - i40e_mem_pd = 5, /* Page Descriptor */ - i40e_mem_bp = 6, /* Backing Page - 4KB */ - i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ - i40e_mem_reserved +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +}; + +struct i40e_virt_mem { + void *va; + u32 size; }; /* prototype for functions used for dynamic memory allocation */ int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, - enum i40e_memory_type type, u64 size, u32 alignment); int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 639c5a1ca8..306758428a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -6,7 +6,6 @@ #include #include "i40e.h" -#include "i40e_prototype.h" static LIST_HEAD(i40e_devices); static DEFINE_MUTEX(i40e_device_mutex); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 1b493854f5..3eb6564c1c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,11 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ -#include "i40e.h" -#include "i40e_type.h" -#include "i40e_adminq.h" -#include "i40e_prototype.h" #include +#include +#include +#include +#include +#include "i40e_adminq_cmd.h" +#include "i40e_devids.h" +#include "i40e_prototype.h" +#include "i40e_register.h" /** * i40e_set_mac_type - Sets MAC type @@ -818,62 +822,72 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) } /** - * i40e_read_pba_string - Reads part number string from EEPROM + * i40e_get_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length * - * Reads the part number string from the EEPROM. + * Reads the part number string from the EEPROM and stores it + * into newly allocated buffer and saves resulting pointer + * to i40e_hw->pba_id field. **/ -int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size) +void i40e_get_pba_string(struct i40e_hw *hw) { +#define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; - int status = 0; - u16 i = 0; + int status; + char *ptr; + u16 i; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); - if (status || (pba_word != 0xFAFA)) { - hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); - return status; + if (status) { + hw_dbg(hw, "Failed to read PBA flags.\n"); + return; + } + if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) { + hw_dbg(hw, "PBA block is not present.\n"); + return; } status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); if (status) { hw_dbg(hw, "Failed to read PBA Block pointer.\n"); - return status; + return; } status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); if (status) { hw_dbg(hw, "Failed to read PBA Block size.\n"); - return status; + return; } /* Subtract one to get PBA word count (PBA Size word is included in - * total size) + * total size) and advance pointer to first PBA word. */ pba_size--; - if (pba_num_size < (((u32)pba_size * 2) + 1)) { - hw_dbg(hw, "Buffer too small for PBA data.\n"); - return -EINVAL; + pba_ptr++; + if (!pba_size) { + hw_dbg(hw, "PBA ID is empty.\n"); + return; } + ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL); + if (!ptr) + return; + hw->pba_id = ptr; + for (i = 0; i < pba_size; i++) { - status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); + status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word); if (status) { hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); - return status; + devm_kfree(i40e_hw_to_dev(hw), hw->pba_id); + hw->pba_id = NULL; + return; } - pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; - pba_num[(i * 2) + 1] = pba_word & 0xFF; + *ptr++ = (pba_word >> 8) & 0xFF; + *ptr++ = pba_word & 0xFF; } - pba_num[(pba_size * 2)] = '\0'; - - return status; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index f81e744c0f..d57dd30b02 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1,9 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ +#include #include "i40e_adminq.h" -#include "i40e_prototype.h" +#include "i40e_alloc.h" #include "i40e_dcb.h" +#include "i40e_prototype.h" /** * i40e_get_dcbx_status diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 195421d863..077a95dad3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -2,8 +2,8 @@ /* Copyright(c) 2013 - 2021 Intel Corporation. */ #ifdef CONFIG_I40E_DCB -#include "i40e.h" #include +#include "i40e.h" #define I40E_DCBNL_STATUS_SUCCESS 0 #define I40E_DCBNL_STATUS_ERROR 1 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index 0e72abd178..cf25bfc5dc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -1,9 +1,27 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include #include "i40e.h" -#include +#define I40_DDP_FLASH_REGION 100 +#define I40E_PROFILE_INFO_SIZE 48 +#define I40E_MAX_PROFILE_NUM 16 +#define I40E_PROFILE_LIST_SIZE \ + (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4) +#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/" +#define I40E_DDP_PROFILE_NAME_MAX 64 + +struct i40e_ddp_profile_list { + u32 p_count; + struct i40e_profile_info p_info[]; +}; + +struct i40e_ddp_old_profile_list { + struct list_head list; + size_t old_ddp_size; + u8 old_ddp_buf[]; +}; /** * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent @@ -261,8 +279,8 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev, * Checks correctness and loads DDP profile to the NIC. The function is * also used for rolling back previously loaded profile. **/ -int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, - bool is_add) +static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, + bool is_add) { u8 profile_info_sec[sizeof(struct i40e_profile_section_header) + sizeof(struct i40e_profile_info)]; @@ -438,10 +456,9 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash) char profile_name[sizeof(I40E_DDP_PROFILE_PATH) + I40E_DDP_PROFILE_NAME_MAX]; - profile_name[sizeof(profile_name) - 1] = 0; - strncpy(profile_name, I40E_DDP_PROFILE_PATH, - sizeof(profile_name) - 1); - strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX); + scnprintf(profile_name, sizeof(profile_name), "%s%s", + I40E_DDP_PROFILE_PATH, flash->data); + /* Load DDP recipe. */ status = request_firmware(&ddp_config, profile_name, &netdev->dev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h new file mode 100644 index 0000000000..27ebc72d8b --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2023 Intel Corporation. */ + +#ifndef _I40E_DEBUG_H_ +#define _I40E_DEBUG_H_ + +#include + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + I40E_DEBUG_PACKAGE = 0x00002000, + I40E_DEBUG_IWARP = 0x00F00000, + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +struct i40e_hw; +struct device *i40e_hw_to_dev(struct i40e_hw *hw); + +#define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A) + +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + dev_info(i40e_hw_to_dev(hw), s, ##__VA_ARGS__); \ +} while (0) + +#endif /* _I40E_DEBUG_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 1a497cb077..999c9708de 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -5,8 +5,9 @@ #include #include - +#include #include "i40e.h" +#include "i40e_virtchnl_pf.h" static struct dentry *i40e_dbg_root; diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c new file mode 100644 index 0000000000..cc4e9e2add --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2023 Intel Corporation. */ + +#include +#include "i40e.h" +#include "i40e_devlink.h" + +static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len) +{ + u8 dsn[8]; + + put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); + + snprintf(buf, len, "%8phD", dsn); +} + +static void i40e_info_fw_mgmt(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_adminq_info *aq = &hw->aq; + + snprintf(buf, len, "%u.%u", aq->fw_maj_ver, aq->fw_min_ver); +} + +static void i40e_info_fw_mgmt_build(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_adminq_info *aq = &hw->aq; + + snprintf(buf, len, "%05d", aq->fw_build); +} + +static void i40e_info_fw_api(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_adminq_info *aq = &hw->aq; + + snprintf(buf, len, "%u.%u", aq->api_maj_ver, aq->api_min_ver); +} + +static void i40e_info_pba(struct i40e_hw *hw, char *buf, size_t len) +{ + buf[0] = '\0'; + if (hw->pba_id) + strscpy(buf, hw->pba_id, len); +} + +enum i40e_devlink_version_type { + I40E_DL_VERSION_FIXED, + I40E_DL_VERSION_RUNNING, +}; + +static int i40e_devlink_info_put(struct devlink_info_req *req, + enum i40e_devlink_version_type type, + const char *key, const char *value) +{ + if (!strlen(value)) + return 0; + + switch (type) { + case I40E_DL_VERSION_FIXED: + return devlink_info_version_fixed_put(req, key, value); + case I40E_DL_VERSION_RUNNING: + return devlink_info_version_running_put(req, key, value); + } + return 0; +} + +static int i40e_devlink_info_get(struct devlink *dl, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct i40e_pf *pf = devlink_priv(dl); + struct i40e_hw *hw = &pf->hw; + char buf[32]; + int err; + + i40e_info_get_dsn(pf, buf, sizeof(buf)); + err = devlink_info_serial_number_put(req, buf); + if (err) + return err; + + i40e_info_fw_mgmt(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf); + if (err) + return err; + + i40e_info_fw_mgmt_build(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + "fw.mgmt.build", buf); + if (err) + return err; + + i40e_info_fw_api(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, + buf); + if (err) + return err; + + i40e_info_nvm_ver(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + "fw.psid.api", buf); + if (err) + return err; + + i40e_info_eetrack(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, + buf); + if (err) + return err; + + i40e_info_civd_ver(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, buf); + if (err) + return err; + + i40e_info_pba(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, buf); + + return err; +} + +static const struct devlink_ops i40e_devlink_ops = { + .info_get = i40e_devlink_info_get, +}; + +/** + * i40e_alloc_pf - Allocate devlink and return i40e_pf structure pointer + * @dev: the device to allocate for + * + * Allocate a devlink instance for this device and return the private + * area as the i40e_pf structure. + **/ +struct i40e_pf *i40e_alloc_pf(struct device *dev) +{ + struct devlink *devlink; + + devlink = devlink_alloc(&i40e_devlink_ops, sizeof(struct i40e_pf), dev); + if (!devlink) + return NULL; + + return devlink_priv(devlink); +} + +/** + * i40e_free_pf - Free i40e_pf structure and associated devlink + * @pf: the PF structure + * + * Free i40e_pf structure and devlink allocated by devlink_alloc. + **/ +void i40e_free_pf(struct i40e_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + + devlink_free(devlink); +} + +/** + * i40e_devlink_register - Register devlink interface for this PF + * @pf: the PF to register the devlink for. + * + * Register the devlink instance associated with this physical function. + **/ +void i40e_devlink_register(struct i40e_pf *pf) +{ + devlink_register(priv_to_devlink(pf)); +} + +/** + * i40e_devlink_unregister - Unregister devlink resources for this PF. + * @pf: the PF structure to cleanup + * + * Releases resources used by devlink and cleans up associated memory. + **/ +void i40e_devlink_unregister(struct i40e_pf *pf) +{ + devlink_unregister(priv_to_devlink(pf)); +} + +/** + * i40e_devlink_set_switch_id - Set unique switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void i40e_devlink_set_switch_id(struct i40e_pf *pf, + struct netdev_phys_item_id *ppid) +{ + u64 id = pci_get_dsn(pf->pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +/** + * i40e_devlink_create_port - Create a devlink port for this PF + * @pf: the PF to create a port for + * + * Create and register a devlink_port for this PF. Note that although each + * physical function is connected to a separate devlink instance, the port + * will still be numbered according to the physical function id. + * + * Return: zero on success or an error code on failure. + **/ +int i40e_devlink_create_port(struct i40e_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct devlink_port_attrs attrs = {}; + struct device *dev = &pf->pdev->dev; + int err; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.pf_id; + i40e_devlink_set_switch_id(pf, &attrs.switch_id); + devlink_port_attrs_set(&pf->devlink_port, &attrs); + err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id); + if (err) { + dev_err(dev, "devlink_port_register failed: %d\n", err); + return err; + } + + return 0; +} + +/** + * i40e_devlink_destroy_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup + * + * Unregisters the devlink_port structure associated with this PF. + **/ +void i40e_devlink_destroy_port(struct i40e_pf *pf) +{ + devlink_port_unregister(&pf->devlink_port); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.h b/drivers/net/ethernet/intel/i40e/i40e_devlink.h new file mode 100644 index 0000000000..469fb3d2ee --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023, Intel Corporation. */ + +#ifndef _I40E_DEVLINK_H_ +#define _I40E_DEVLINK_H_ + +#include + +struct i40e_pf; + +struct i40e_pf *i40e_alloc_pf(struct device *dev); +void i40e_free_pf(struct i40e_pf *pf); +void i40e_devlink_register(struct i40e_pf *pf); +void i40e_devlink_unregister(struct i40e_pf *pf); +int i40e_devlink_create_port(struct i40e_pf *pf); +void i40e_devlink_destroy_port(struct i40e_pf *pf); + +#endif /* _I40E_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index c3ce5f3521..ece3a6b9a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -4,7 +4,10 @@ #ifndef _I40E_DIAG_H_ #define _I40E_DIAG_H_ -#include "i40e_type.h" +#include "i40e_adminq_cmd.h" + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; enum i40e_lb_mode { I40E_LB_MODE_NONE = 0x0, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index bd1321bf7e..fd7163128c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -3,9 +3,10 @@ /* ethtool support for i40e */ -#include "i40e.h" +#include "i40e_devids.h" #include "i40e_diag.h" #include "i40e_txrx_common.h" +#include "i40e_virtchnl_pf.h" /* ethtool statistics helpers */ @@ -245,6 +246,7 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_errors), I40E_NETDEV_STAT(tx_errors), I40E_NETDEV_STAT(rx_dropped), + I40E_NETDEV_STAT(rx_missed_errors), I40E_NETDEV_STAT(tx_dropped), I40E_NETDEV_STAT(collisions), I40E_NETDEV_STAT(rx_length_errors), @@ -321,7 +323,7 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors), - I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards), + I40E_PF_STAT("port.rx_discards", stats.eth.rx_discards), I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors), I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes), @@ -2004,8 +2006,8 @@ static void i40e_get_drvinfo(struct net_device *netdev, struct i40e_pf *pf = vsi->back; strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); - strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), - sizeof(drvinfo->fw_version)); + i40e_nvm_version_str(&pf->hw, drvinfo->fw_version, + sizeof(drvinfo->fw_version)); strscpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; @@ -2512,11 +2514,13 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) u8 *p = data; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string); + ethtool_sprintf(&p, "%s", + i40e_gstrings_priv_flags[i].flag_string); if (pf->hw.pf_id != 0) return; for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string); + ethtool_sprintf(&p, "%s", + i40e_gl_gstrings_priv_flags[i].flag_string); } static void i40e_get_strings(struct net_device *netdev, u32 stringset, diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 96ee63aca7..1742624ca6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,10 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e.h" -#include "i40e_osdep.h" -#include "i40e_register.h" #include "i40e_alloc.h" +#include "i40e_debug.h" #include "i40e_hmc.h" #include "i40e_type.h" @@ -22,7 +20,6 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw, enum i40e_sd_entry_type type, u64 direct_mode_sz) { - enum i40e_memory_type mem_type __attribute__((unused)); struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; struct i40e_dma_mem mem; @@ -43,16 +40,13 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw, sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; if (!sd_entry->valid) { - if (I40E_SD_TYPE_PAGED == type) { - mem_type = i40e_mem_pd; + if (type == I40E_SD_TYPE_PAGED) alloc_len = I40E_HMC_PAGED_BP_SIZE; - } else { - mem_type = i40e_mem_bp_jumbo; + else alloc_len = direct_mode_sz; - } /* allocate a 4K pd page or 2M backing page */ - ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, + ret_code = i40e_allocate_dma_mem(hw, &mem, alloc_len, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) goto exit; @@ -140,7 +134,7 @@ int i40e_add_pd_table_entry(struct i40e_hw *hw, page = rsrc_pg; } else { /* allocate a 4K backing page */ - ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + ret_code = i40e_allocate_dma_mem(hw, page, I40E_HMC_PAGED_BP_SIZE, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 9960da07a5..480e3a883c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -4,6 +4,10 @@ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ +#include "i40e_alloc.h" +#include "i40e_io.h" +#include "i40e_register.h" + #define I40E_HMC_MAX_BP_COUNT 512 /* forward-declare the HW struct for the compiler */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_io.h b/drivers/net/ethernet/intel/i40e/i40e_io.h new file mode 100644 index 0000000000..2a2ed9a1d4 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_io.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2023 Intel Corporation. */ + +#ifndef _I40E_IO_H_ +#define _I40E_IO_H_ + +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) + +#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) + +#endif /* _I40E_IO_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 474365bf06..beaaf5c309 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,13 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e.h" -#include "i40e_osdep.h" -#include "i40e_register.h" -#include "i40e_type.h" -#include "i40e_hmc.h" +#include "i40e_alloc.h" +#include "i40e_debug.h" #include "i40e_lan_hmc.h" -#include "i40e_prototype.h" +#include "i40e_type.h" /* lan specific interface functions */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index 9f960404c2..305a276953 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -4,6 +4,8 @@ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ +#include "i40e_hmc.h" + /* forward-declare the HW struct for the compiler */ struct i40e_hw; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index aad39ebff4..d9716bcec8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,19 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include /* Local includes */ #include "i40e.h" +#include "i40e_devids.h" #include "i40e_diag.h" +#include "i40e_lan_hmc.h" +#include "i40e_virtchnl_pf.h" #include "i40e_xsk.h" -#include -#include + /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -126,16 +129,27 @@ static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, } /** - * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code + * i40e_hw_to_dev - get device pointer from the hardware structure + * @hw: pointer to the device HW structure + **/ +struct device *i40e_hw_to_dev(struct i40e_hw *hw) +{ + struct i40e_pf *pf = i40e_hw_to_pf(hw); + + return &pf->pdev->dev; +} + +/** + * i40e_allocate_dma_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ -int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, - u64 size, u32 alignment) +int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, + u64 size, u32 alignment) { - struct i40e_pf *pf = (struct i40e_pf *)hw->back; + struct i40e_pf *pf = i40e_hw_to_pf(hw); mem->size = ALIGN(size, alignment); mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, @@ -147,13 +161,13 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, } /** - * i40e_free_dma_mem_d - OS specific memory free for shared code + * i40e_free_dma_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem) { - struct i40e_pf *pf = (struct i40e_pf *)hw->back; + struct i40e_pf *pf = i40e_hw_to_pf(hw); dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); mem->va = NULL; @@ -164,13 +178,13 @@ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) } /** - * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code + * i40e_allocate_virt_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, - u32 size) +int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, + u32 size) { mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); @@ -182,11 +196,11 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, } /** - * i40e_free_virt_mem_d - OS specific memory free for shared code + * i40e_free_virt_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) +int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem) { /* it's ok to kfree a NULL pointer */ kfree(mem->va); @@ -495,6 +509,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; stats->rx_dropped = vsi_stats->rx_dropped; + stats->rx_missed_errors = vsi_stats->rx_missed_errors; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; } @@ -686,17 +701,13 @@ i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, struct i40e_eth_stats *stat_offset, struct i40e_eth_stats *stat) { - u64 rx_rdpc, rx_rxerr; - i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded, - &stat_offset->rx_discards, &rx_rdpc); + &stat_offset->rx_discards, &stat->rx_discards); i40e_stat_update64(hw, I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), offset_loaded, &stat_offset->rx_discards_other, - &rx_rxerr); - - stat->rx_discards = rx_rdpc + rx_rxerr; + &stat->rx_discards_other); } /** @@ -718,9 +729,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); - i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), - vsi->stat_offsets_loaded, - &oes->rx_discards, &es->rx_discards); i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); @@ -977,8 +985,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ns->tx_errors = es->tx_errors; ons->multicast = oes->rx_multicast; ns->multicast = es->rx_multicast; - ons->rx_dropped = oes->rx_discards; - ns->rx_dropped = es->rx_discards; + ons->rx_dropped = oes->rx_discards_other; + ns->rx_dropped = es->rx_discards_other; + ons->rx_missed_errors = oes->rx_discards; + ns->rx_missed_errors = es->rx_discards; ons->tx_dropped = oes->tx_discards; ns->tx_dropped = es->tx_discards; @@ -5351,7 +5361,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { int v, ret = 0; - for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) { ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) @@ -10809,7 +10819,9 @@ static void i40e_get_oem_version(struct i40e_hw *hw) &gen_snap); i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET, &release); - hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; + hw->nvm.oem_ver = + FIELD_PREP(I40E_OEM_GEN_MASK | I40E_OEM_SNAP_MASK, gen_snap) | + FIELD_PREP(I40E_OEM_RELEASE_MASK, release); hw->nvm.eetrack = I40E_OEM_EETRACK_ID; } @@ -14222,6 +14234,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) } set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; + if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { vsi->netdev_registered = false; @@ -14235,6 +14248,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); } + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); + spin_lock_bh(&vsi->mac_filter_hash_lock); /* clear the sync flag on all filters */ @@ -14415,6 +14431,8 @@ err_rings: free_netdev(vsi->netdev); vsi->netdev = NULL; } + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); @@ -14555,9 +14573,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ret = i40e_netif_set_realnum_tx_rx_queues(vsi); if (ret) goto err_netdev; + if (vsi->type == I40E_VSI_MAIN) { + ret = i40e_devlink_create_port(pf); + if (ret) + goto err_netdev; + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); + } ret = register_netdev(vsi->netdev); if (ret) - goto err_netdev; + goto err_dl_port; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); #ifdef CONFIG_I40E_DCB @@ -14600,6 +14624,9 @@ err_msix: free_netdev(vsi->netdev); vsi->netdev = NULL; } +err_dl_port: + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); err_netdev: i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: @@ -15630,7 +15657,7 @@ err_switch_setup: iounmap(hw->hw_addr); pci_release_mem_regions(pf->pdev); pci_disable_device(pf->pdev); - kfree(pf); + i40e_free_pf(pf); return err; } @@ -15644,10 +15671,10 @@ err_switch_setup: **/ static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw) { - struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev; + struct i40e_pf *pf = i40e_hw_to_pf(hw); - hw->subsystem_device_id = pdev->subsystem_device ? - pdev->subsystem_device : + hw->subsystem_device_id = pf->pdev->subsystem_device ? + pf->pdev->subsystem_device : (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX); } @@ -15672,6 +15699,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct i40e_hw *hw; static u16 pfs_found; u16 wol_nvm_bits; + char nvm_ver[32]; u16 link_status; #ifdef CONFIG_I40E_DCB int status; @@ -15707,7 +15735,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * the Admin Queue structures and then querying for the * device's current profile information. */ - pf = kzalloc(sizeof(*pf), GFP_KERNEL); + pf = i40e_alloc_pf(&pdev->dev); if (!pf) { err = -ENOMEM; goto err_pf_alloc; @@ -15717,7 +15745,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) set_bit(__I40E_DOWN, pf->state); hw = &pf->hw; - hw->back = pf; pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), I40E_MAX_CSR_SPACE); @@ -15842,13 +15869,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pf_reset; } i40e_get_oem_version(hw); + i40e_get_pba_string(hw); /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */ + i40e_nvm_version_str(hw, nvm_ver, sizeof(nvm_ver)); dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, - hw->aq.api_maj_ver, hw->aq.api_min_ver, - i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, - hw->subsystem_vendor_id, hw->subsystem_device_id); + hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver, + hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, + hw->subsystem_device_id); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) @@ -16235,6 +16264,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* print a string summarizing features */ i40e_print_features(pf); + i40e_devlink_register(pf); + return 0; /* Unwind what we've done if something failed in the setup */ @@ -16255,7 +16286,7 @@ err_adminq_setup: err_pf_reset: iounmap(hw->hw_addr); err_ioremap: - kfree(pf); + i40e_free_pf(pf); err_pf_alloc: pci_release_mem_regions(pdev); err_pci_reg: @@ -16280,6 +16311,8 @@ static void i40e_remove(struct pci_dev *pdev) int ret_code; int i; + i40e_devlink_unregister(pf); + i40e_dbg_pf_exit(pf); i40e_ptp_stop(pf); @@ -16405,7 +16438,7 @@ unmap: kfree(pf->vsi); iounmap(hw->hw_addr); - kfree(pf); + i40e_free_pf(pf); pci_release_mem_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 07a46adeab..e5aec09d58 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include +#include +#include "i40e_alloc.h" #include "i40e_prototype.h" /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h deleted file mode 100644 index 2bd4de03da..0000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_OSDEP_H_ -#define _I40E_OSDEP_H_ - -#include -#include -#include -#include -#include -#include - -/* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include - -/* File to be the magic between shared code and - * actual OS primitives - */ - -#define hw_dbg(hw, S, A...) \ -do { \ - dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \ -} while (0) - -#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) -#define rd32(a, reg) readl((a)->hw_addr + (reg)) - -#define rd64(a, reg) readq((a)->hw_addr + (reg)) -#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) - -/* memory allocation tracking */ -struct i40e_dma_mem { - void *va; - dma_addr_t pa; - u32 size; -}; - -#define i40e_allocate_dma_mem(h, m, unused, s, a) \ - i40e_allocate_dma_mem_d(h, m, s, a) -#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) - -struct i40e_virt_mem { - void *va; - u32 size; -}; - -#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) -#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) - -#define i40e_debug(h, m, s, ...) \ -do { \ - if (((m) & (h)->debug_mask)) \ - pr_info("i40e %02x:%02x.%x " s, \ - (h)->bus.bus_id, (h)->bus.device, \ - (h)->bus.func, ##__VA_ARGS__); \ -} while (0) - -#endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 3eeee224f1..0011620420 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -4,9 +4,10 @@ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ -#include "i40e_type.h" -#include "i40e_alloc.h" +#include #include +#include "i40e_debug.h" +#include "i40e_type.h" /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are @@ -340,8 +341,7 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); -int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size); +void i40e_get_pba_string(struct i40e_hw *hw); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); /* prototype for functions used for NVM access */ int i40e_init_nvm(struct i40e_hw *hw); @@ -497,4 +497,8 @@ int i40e_add_pinfo_to_list(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id); + +/* i40e_ddp */ +int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); + #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 8a26811140..20b77398f0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e.h" #include #include +#include "i40e.h" +#include "i40e_devids.h" /* The XL710 timesync is very much like Intel's 82599 design when it comes to * the fundamental clock design. However, the clock operations are much simpler diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 7339003aa1..f6671ac797 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -4,6 +4,9 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) + #define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ @@ -202,7 +205,9 @@ #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1df2f93388..071ef309a3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,14 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include #include +#include +#include #include #include -#include "i40e.h" -#include "i40e_trace.h" -#include "i40e_prototype.h" #include "i40e_txrx_common.h" +#include "i40e_trace.h" #include "i40e_xsk.h" #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) @@ -2402,7 +2401,7 @@ void i40e_update_rx_stats(struct i40e_ring *rx_ring, void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) { if (xdp_res & I40E_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_res & I40E_XDP_TX) { struct i40e_ring *xdp_ring = diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 900b0d9ede..421fe56755 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -5,6 +5,7 @@ #define _I40E_TXRX_H_ #include +#include "i40e_type.h" /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_DEFAULT_IRQ_WORK 256 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index 8c5118c8ba..e26807fd21 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -4,6 +4,8 @@ #ifndef I40E_TXRX_COMMON_ #define I40E_TXRX_COMMON_ +#include "i40e.h" + int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, u64 qword1); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 232131bedc..f95bc2a4a8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -4,15 +4,9 @@ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ -#include "i40e_osdep.h" -#include "i40e_register.h" +#include #include "i40e_adminq.h" #include "i40e_hmc.h" -#include "i40e_lan_hmc.h" -#include "i40e_devids.h" - -/* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 4 @@ -43,48 +37,14 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); #define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 -/* debug masks - set these bits in hw->debug_mask to control output */ -enum i40e_debug_mask { - I40E_DEBUG_INIT = 0x00000001, - I40E_DEBUG_RELEASE = 0x00000002, - - I40E_DEBUG_LINK = 0x00000010, - I40E_DEBUG_PHY = 0x00000020, - I40E_DEBUG_HMC = 0x00000040, - I40E_DEBUG_NVM = 0x00000080, - I40E_DEBUG_LAN = 0x00000100, - I40E_DEBUG_FLOW = 0x00000200, - I40E_DEBUG_DCB = 0x00000400, - I40E_DEBUG_DIAG = 0x00000800, - I40E_DEBUG_FD = 0x00001000, - I40E_DEBUG_PACKAGE = 0x00002000, - I40E_DEBUG_IWARP = 0x00F00000, - I40E_DEBUG_AQ_MESSAGE = 0x01000000, - I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, - I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, - I40E_DEBUG_AQ_COMMAND = 0x06000000, - I40E_DEBUG_AQ = 0x0F000000, - - I40E_DEBUG_USER = 0xF0000000, - - I40E_DEBUG_ALL = 0xFFFFFFFF -}; - -#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \ - I40E_GLGEN_MSCA_STCODE_SHIFT) -#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) - -#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \ - I40E_GLGEN_MSCA_STCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1) +#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) +#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2) + +#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0) +#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0) +#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) +#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3) #define I40E_PHY_COM_REG_PAGE 0x1E #define I40E_PHY_LED_LINK_MODE_MASK 0xF0 @@ -525,7 +485,6 @@ struct i40e_dcbx_config { /* Port hardware description */ struct i40e_hw { u8 __iomem *hw_addr; - void *back; /* subsystem structs */ struct i40e_phy_info phy; @@ -534,6 +493,9 @@ struct i40e_hw { struct i40e_nvm_info nvm; struct i40e_fc_info fc; + /* PBA ID */ + const char *pba_id; + /* pci info */ u16 device_id; u16 vendor_id; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a97ca2224d..3d8a23d335 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2,6 +2,8 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" +#include "i40e_lan_hmc.h" +#include "i40e_virtchnl_pf.h" /*********************notification routines***********************/ @@ -2605,6 +2607,14 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; int i; + if (vf->is_disabled_from_host) { + aq_ret = -EPERM; + dev_info(&pf->pdev->dev, + "Admin has disabled VF %d, will not enable queues\n", + vf->vf_id); + goto error_param; + } + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = -EINVAL; goto error_param; @@ -2840,6 +2850,24 @@ error_param: (u8 *)&stats, sizeof(stats)); } +/** + * i40e_can_vf_change_mac + * @vf: pointer to the VF info + * + * Return true if the VF is allowed to change its MAC filters, false otherwise + */ +static bool i40e_can_vf_change_mac(struct i40e_vf *vf) +{ + /* If the VF MAC address has been set administratively (via the + * ndo_set_vf_mac command), then deny permission to the VF to + * add/delete unicast MAC addresses, unless the VF is trusted + */ + if (vf->pf_set_mac && !vf->trusted) + return false; + + return true; +} + #define I40E_MAX_MACVLAN_PER_HW 3072 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ (num_ports)) @@ -2899,8 +2927,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - !is_multicast_ether_addr(addr) && vf->pf_set_mac && + if (!i40e_can_vf_change_mac(vf) && + !is_multicast_ether_addr(addr) && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); @@ -3106,19 +3134,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) ret = -EINVAL; goto error_param; } - if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) - was_unimac_deleted = true; } vsi = pf->vsi[vf->lan_vsi_idx]; spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ - for (i = 0; i < al->num_elements; i++) + for (i = 0; i < al->num_elements; i++) { + const u8 *addr = al->list[i].addr; + + /* Allow to delete VF primary MAC only if it was not set + * administratively by PF or if VF is trusted. + */ + if (ether_addr_equal(addr, vf->default_lan_addr.addr) && + i40e_can_vf_change_mac(vf)) + was_unimac_deleted = true; + else + continue; + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } + } spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -4732,9 +4770,12 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) struct i40e_link_status *ls = &pf->hw.phy.link_info; struct virtchnl_pf_event pfe; struct i40e_hw *hw = &pf->hw; + struct i40e_vsi *vsi; + unsigned long q_map; struct i40e_vf *vf; int abs_vf_id; int ret = 0; + int tmp; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); @@ -4757,17 +4798,38 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) switch (link) { case IFLA_VF_LINK_STATE_AUTO: vf->link_forced = false; + vf->is_disabled_from_host = false; + /* reset needed to reinit VF resources */ + i40e_vc_reset_vf(vf, true); i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_ENABLE: vf->link_forced = true; vf->link_up = true; + vf->is_disabled_from_host = false; + /* reset needed to reinit VF resources */ + i40e_vc_reset_vf(vf, true); i40e_set_vf_link_state(vf, &pfe, ls); break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; vf->link_up = false; i40e_set_vf_link_state(vf, &pfe, ls); + + vsi = pf->vsi[vf->lan_vsi_idx]; + q_map = BIT(vsi->num_queue_pairs) - 1; + + vf->is_disabled_from_host = true; + + /* Try to stop both Tx&Rx rings even if one of the calls fails + * to ensure we stop the rings even in case of errors. + * If any of them returns with an error then the first + * error that occurred will be returned. + */ + tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false); + ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false); + + ret = tmp ? tmp : ret; break; default: ret = -EINVAL; @@ -4944,7 +5006,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->tx_bytes = stats->tx_bytes; vf_stats->broadcast = stats->rx_broadcast; vf_stats->multicast = stats->rx_multicast; - vf_stats->rx_dropped = stats->rx_discards; + vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; vf_stats->tx_dropped = stats->tx_discards; return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index bda9ba668c..66f95e2f31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -4,7 +4,9 @@ #ifndef _I40E_VIRTCHNL_PF_H_ #define _I40E_VIRTCHNL_PF_H_ -#include "i40e.h" +#include +#include +#include "i40e_type.h" #define I40E_MAX_VLANID 4095 @@ -98,6 +100,7 @@ struct i40e_vf { bool link_forced; bool link_up; /* only valid if VF link is forced */ bool spoofchk; + bool is_disabled_from_host; /* PF ctrl of VF enable/disable */ u16 num_vlan; /* ADq related variables */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 1f8ae6f5d9..65f38a57b3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -2,11 +2,7 @@ /* Copyright(c) 2018 Intel Corporation. */ #include -#include #include -#include - -#include "i40e.h" #include "i40e_txrx_common.h" #include "i40e_xsk.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 821df248f8..ef156fad52 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -4,6 +4,8 @@ #ifndef _I40E_XSK_H_ #define _I40E_XSK_H_ +#include + /* This value should match the pragma in the loop_unrolled_for * macro. Why 4? It is strictly empirical. It seems to be a good * compromise between the advantage of having simultaneous outstanding @@ -20,7 +22,9 @@ #define loop_unrolled_for for #endif +struct i40e_ring; struct i40e_vsi; +struct net_device; struct xsk_buff_pool; int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index 9c3e45c54d..2d154a4e2f 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -13,4 +13,4 @@ obj-$(CONFIG_IAVF) += iavf.o iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ iavf_adv_rss.o \ - iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o + iavf_txrx.o iavf_common.o iavf_adminq.o diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 431d9d62c8..63b45c61cc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -63,7 +63,6 @@ struct iavf_vsi { DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); int base_vector; u16 qs_handle; - void *priv; /* client driver data reference. */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -256,7 +255,6 @@ struct iavf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct work_struct finish_config; - struct delayed_work client_task; wait_queue_head_t down_waitqueue; wait_queue_head_t reset_waitqueue; wait_queue_head_t vc_waitqueue; @@ -265,7 +263,6 @@ struct iavf_adapter { int num_vlan_filters; struct list_head mac_filter_list; struct mutex crit_lock; - struct mutex client_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; @@ -282,10 +279,6 @@ struct iavf_adapter { u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; - int num_rdma_msix; - int rdma_base_vector; - u32 client_pending; - struct iavf_client_instance *cinst; struct msix_entry *msix_entries; u32 flags; @@ -294,10 +287,6 @@ struct iavf_adapter { #define IAVF_FLAG_RESET_PENDING BIT(4) #define IAVF_FLAG_RESET_NEEDED BIT(5) #define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) -#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) -#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) -#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) @@ -378,7 +367,6 @@ struct iavf_adapter { unsigned long crit_section; struct delayed_work watchdog_task; - bool netdev_registered; bool link_up; enum virtchnl_link_speed link_speed; /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set @@ -390,11 +378,6 @@ struct iavf_adapter { u32 link_speed_mbps; enum virtchnl_ops current_op; -#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ - (_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RDMA : \ - 0) -#define CLIENT_ENABLED(_a) ((_a)->cinst) /* RSS by the PF should be preferred over RSS via other methods. */ #define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_RSS_PF) @@ -407,6 +390,8 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_VLAN) #define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN_V2) +#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_CRC) #define VLAN_V2_FILTERING_ALLOWED(_a) \ (VLAN_V2_ALLOWED((_a)) && \ ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \ @@ -460,12 +445,6 @@ struct iavf_adapter { /* Ethtool Private Flags */ -/* lan device, used by client interface */ -struct iavf_device { - struct list_head list; - struct iavf_adapter *vf; -}; - /* needed by iavf_ethtool.c */ extern char iavf_driver_name[]; @@ -569,11 +548,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, int iavf_config_rss(struct iavf_adapter *adapter); int iavf_lan_add_device(struct iavf_adapter *adapter); int iavf_lan_del_device(struct iavf_adapter *adapter); -void iavf_client_subtask(struct iavf_adapter *adapter); -void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len); -void iavf_notify_client_l2_params(struct iavf_vsi *vsi); -void iavf_notify_client_open(struct iavf_vsi *vsi); -void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset); void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c deleted file mode 100644 index e6051b6355..0000000000 --- a/drivers/net/ethernet/intel/iavf/iavf_client.c +++ /dev/null @@ -1,578 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include -#include - -#include "iavf.h" -#include "iavf_prototype.h" -#include "iavf_client.h" - -static -const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR; -static struct iavf_client *vf_registered_client; -static LIST_HEAD(iavf_devices); -static DEFINE_MUTEX(iavf_device_mutex); - -static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, - struct iavf_client *client, - u8 *msg, u16 len); - -static int iavf_client_setup_qvlist(struct iavf_info *ldev, - struct iavf_client *client, - struct iavf_qvlist_info *qvlist_info); - -static struct iavf_ops iavf_lan_ops = { - .virtchnl_send = iavf_client_virtchnl_send, - .setup_qvlist = iavf_client_setup_qvlist, -}; - -/** - * iavf_client_get_params - retrieve relevant client parameters - * @vsi: VSI with parameters - * @params: client param struct - **/ -static -void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params) -{ - int i; - - memset(params, 0, sizeof(struct iavf_params)); - params->mtu = vsi->netdev->mtu; - params->link_up = vsi->back->link_up; - - for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) { - params->qos.prio_qos[i].tc = 0; - params->qos.prio_qos[i].qs_handle = vsi->qs_handle; - } -} - -/** - * iavf_notify_client_message - call the client message receive callback - * @vsi: the VSI associated with this client - * @msg: message buffer - * @len: length of message - * - * If there is a client to this VSI, call the client - **/ -void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) -{ - struct iavf_client_instance *cinst; - - if (!vsi) - return; - - cinst = vsi->back->cinst; - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->virtchnl_receive) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance virtchnl_receive function\n"); - return; - } - cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client, - msg, len); -} - -/** - * iavf_notify_client_l2_params - call the client notify callback - * @vsi: the VSI with l2 param changes - * - * If there is a client to this VSI, call the client - **/ -void iavf_notify_client_l2_params(struct iavf_vsi *vsi) -{ - struct iavf_client_instance *cinst; - struct iavf_params params; - - if (!vsi) - return; - - cinst = vsi->back->cinst; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->l2_param_change) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance l2_param_change function\n"); - return; - } - iavf_client_get_params(vsi, ¶ms); - cinst->lan_info.params = params; - cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, - ¶ms); -} - -/** - * iavf_notify_client_open - call the client open callback - * @vsi: the VSI with netdev opened - * - * If there is a client to this netdev, call the client with open - **/ -void iavf_notify_client_open(struct iavf_vsi *vsi) -{ - struct iavf_adapter *adapter = vsi->back; - struct iavf_client_instance *cinst = adapter->cinst; - int ret; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->open) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance open function\n"); - return; - } - if (!(test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state))) { - ret = cinst->client->ops->open(&cinst->lan_info, cinst->client); - if (!ret) - set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); - } -} - -/** - * iavf_client_release_qvlist - send a message to the PF to release rdma qv map - * @ldev: pointer to L2 context. - * - * Return 0 on success or < 0 on error - **/ -static int iavf_client_release_qvlist(struct iavf_info *ldev) -{ - struct iavf_adapter *adapter = ldev->vf; - enum iavf_status err; - - if (adapter->aq_required) - return -EAGAIN; - - err = iavf_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, - IAVF_SUCCESS, NULL, 0, NULL); - - if (err) - dev_err(&adapter->pdev->dev, - "Unable to send RDMA vector release message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - - return err; -} - -/** - * iavf_notify_client_close - call the client close callback - * @vsi: the VSI with netdev closed - * @reset: true when close called due to reset pending - * - * If there is a client to this netdev, call the client with close - **/ -void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) -{ - struct iavf_adapter *adapter = vsi->back; - struct iavf_client_instance *cinst = adapter->cinst; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->close) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance close function\n"); - return; - } - cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); - iavf_client_release_qvlist(&cinst->lan_info); - clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); -} - -/** - * iavf_client_add_instance - add a client instance to the instance list - * @adapter: pointer to the board struct - * - * Returns cinst ptr on success, NULL on failure - **/ -static struct iavf_client_instance * -iavf_client_add_instance(struct iavf_adapter *adapter) -{ - struct iavf_client_instance *cinst = NULL; - struct iavf_vsi *vsi = &adapter->vsi; - struct netdev_hw_addr *mac = NULL; - struct iavf_params params; - - if (!vf_registered_client) - goto out; - - if (adapter->cinst) { - cinst = adapter->cinst; - goto out; - } - - cinst = kzalloc(sizeof(*cinst), GFP_KERNEL); - if (!cinst) - goto out; - - cinst->lan_info.vf = (void *)adapter; - cinst->lan_info.netdev = vsi->netdev; - cinst->lan_info.pcidev = adapter->pdev; - cinst->lan_info.fid = 0; - cinst->lan_info.ftype = IAVF_CLIENT_FTYPE_VF; - cinst->lan_info.hw_addr = adapter->hw.hw_addr; - cinst->lan_info.ops = &iavf_lan_ops; - cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR; - cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR; - cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD; - iavf_client_get_params(vsi, ¶ms); - cinst->lan_info.params = params; - set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state); - - cinst->lan_info.msix_count = adapter->num_rdma_msix; - cinst->lan_info.msix_entries = - &adapter->msix_entries[adapter->rdma_base_vector]; - - mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, - struct netdev_hw_addr, list); - if (mac) - ether_addr_copy(cinst->lan_info.lanmac, mac->addr); - else - dev_err(&adapter->pdev->dev, "MAC address list is empty!\n"); - - cinst->client = vf_registered_client; - adapter->cinst = cinst; -out: - return cinst; -} - -/** - * iavf_client_del_instance - removes a client instance from the list - * @adapter: pointer to the board struct - * - **/ -static -void iavf_client_del_instance(struct iavf_adapter *adapter) -{ - kfree(adapter->cinst); - adapter->cinst = NULL; -} - -/** - * iavf_client_subtask - client maintenance work - * @adapter: board private structure - **/ -void iavf_client_subtask(struct iavf_adapter *adapter) -{ - struct iavf_client *client = vf_registered_client; - struct iavf_client_instance *cinst; - int ret = 0; - - if (adapter->state < __IAVF_DOWN) - return; - - /* first check client is registered */ - if (!client) - return; - - /* Add the client instance to the instance list */ - cinst = iavf_client_add_instance(adapter); - if (!cinst) - return; - - dev_info(&adapter->pdev->dev, "Added instance of Client %s\n", - client->name); - - if (!test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { - /* Send an Open request to the client */ - - if (client->ops && client->ops->open) - ret = client->ops->open(&cinst->lan_info, client); - if (!ret) - set_bit(__IAVF_CLIENT_INSTANCE_OPENED, - &cinst->state); - else - /* remove client instance */ - iavf_client_del_instance(adapter); - } -} - -/** - * iavf_lan_add_device - add a lan device struct to the list of lan devices - * @adapter: pointer to the board struct - * - * Returns 0 on success or none 0 on error - **/ -int iavf_lan_add_device(struct iavf_adapter *adapter) -{ - struct iavf_device *ldev; - int ret = 0; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &iavf_devices, list) { - if (ldev->vf == adapter) { - ret = -EEXIST; - goto out; - } - } - ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); - if (!ldev) { - ret = -ENOMEM; - goto out; - } - ldev->vf = adapter; - INIT_LIST_HEAD(&ldev->list); - list_add(&ldev->list, &iavf_devices); - dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", - adapter->hw.bus.bus_id, adapter->hw.bus.device, - adapter->hw.bus.func); - - /* Since in some cases register may have happened before a device gets - * added, we can schedule a subtask to go initiate the clients. - */ - adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - -out: - mutex_unlock(&iavf_device_mutex); - return ret; -} - -/** - * iavf_lan_del_device - removes a lan device from the device list - * @adapter: pointer to the board struct - * - * Returns 0 on success or non-0 on error - **/ -int iavf_lan_del_device(struct iavf_adapter *adapter) -{ - struct iavf_device *ldev, *tmp; - int ret = -ENODEV; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) { - if (ldev->vf == adapter) { - dev_info(&adapter->pdev->dev, - "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", - adapter->hw.bus.bus_id, adapter->hw.bus.device, - adapter->hw.bus.func); - list_del(&ldev->list); - kfree(ldev); - ret = 0; - break; - } - } - - mutex_unlock(&iavf_device_mutex); - return ret; -} - -/** - * iavf_client_release - release client specific resources - * @client: pointer to the registered client - * - **/ -static void iavf_client_release(struct iavf_client *client) -{ - struct iavf_client_instance *cinst; - struct iavf_device *ldev; - struct iavf_adapter *adapter; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &iavf_devices, list) { - adapter = ldev->vf; - cinst = adapter->cinst; - if (!cinst) - continue; - if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { - if (client->ops && client->ops->close) - client->ops->close(&cinst->lan_info, client, - false); - iavf_client_release_qvlist(&cinst->lan_info); - clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); - - dev_warn(&adapter->pdev->dev, - "Client %s instance closed\n", client->name); - } - /* delete the client instance */ - iavf_client_del_instance(adapter); - dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", - client->name); - } - mutex_unlock(&iavf_device_mutex); -} - -/** - * iavf_client_prepare - prepare client specific resources - * @client: pointer to the registered client - * - **/ -static void iavf_client_prepare(struct iavf_client *client) -{ - struct iavf_device *ldev; - struct iavf_adapter *adapter; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &iavf_devices, list) { - adapter = ldev->vf; - /* Signal the watchdog to service the client */ - adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - } - mutex_unlock(&iavf_device_mutex); -} - -/** - * iavf_client_virtchnl_send - send a message to the PF instance - * @ldev: pointer to L2 context. - * @client: Client pointer. - * @msg: pointer to message buffer - * @len: message length - * - * Return 0 on success or < 0 on error - **/ -static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, - struct iavf_client *client, - u8 *msg, u16 len) -{ - struct iavf_adapter *adapter = ldev->vf; - enum iavf_status err; - - if (adapter->aq_required) - return -EAGAIN; - - err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA, - IAVF_SUCCESS, msg, len, NULL); - if (err) - dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - - return err; -} - -/** - * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map - * @ldev: pointer to L2 context. - * @client: Client pointer. - * @qvlist_info: queue and vector list - * - * Return 0 on success or < 0 on error - **/ -static int iavf_client_setup_qvlist(struct iavf_info *ldev, - struct iavf_client *client, - struct iavf_qvlist_info *qvlist_info) -{ - struct virtchnl_rdma_qvlist_info *v_qvlist_info; - struct iavf_adapter *adapter = ldev->vf; - struct iavf_qv_info *qv_info; - enum iavf_status err; - u32 v_idx, i; - size_t msg_size; - - if (adapter->aq_required) - return -EAGAIN; - - /* A quick check on whether the vectors belong to the client */ - for (i = 0; i < qvlist_info->num_vectors; i++) { - qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; - v_idx = qv_info->v_idx; - if ((v_idx >= - (adapter->rdma_base_vector + adapter->num_rdma_msix)) || - (v_idx < adapter->rdma_base_vector)) - return -EINVAL; - } - - v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info; - msg_size = virtchnl_struct_size(v_qvlist_info, qv_info, - v_qvlist_info->num_vectors); - - adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP); - err = iavf_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS, - (u8 *)v_qvlist_info, msg_size, NULL); - - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to send RDMA vector config message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - goto out; - } - - err = -EBUSY; - for (i = 0; i < 5; i++) { - msleep(100); - if (!(adapter->client_pending & - BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) { - err = 0; - break; - } - } -out: - return err; -} - -/** - * iavf_register_client - Register a iavf client driver with the L2 driver - * @client: pointer to the iavf_client struct - * - * Returns 0 on success or non-0 on error - **/ -int iavf_register_client(struct iavf_client *client) -{ - int ret = 0; - - if (!client) { - ret = -EIO; - goto out; - } - - if (strlen(client->name) == 0) { - pr_info("iavf: Failed to register client with no name\n"); - ret = -EIO; - goto out; - } - - if (vf_registered_client) { - pr_info("iavf: Client %s has already been registered!\n", - client->name); - ret = -EEXIST; - goto out; - } - - if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) || - (client->version.minor != IAVF_CLIENT_VERSION_MINOR)) { - pr_info("iavf: Failed to register client %s due to mismatched client interface version\n", - client->name); - pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", - client->version.major, client->version.minor, - client->version.build, - iavf_client_interface_version_str); - ret = -EIO; - goto out; - } - - vf_registered_client = client; - - iavf_client_prepare(client); - - pr_info("iavf: Registered client %s with return code %d\n", - client->name, ret); -out: - return ret; -} -EXPORT_SYMBOL(iavf_register_client); - -/** - * iavf_unregister_client - Unregister a iavf client driver with the L2 driver - * @client: pointer to the iavf_client struct - * - * Returns 0 on success or non-0 on error - **/ -int iavf_unregister_client(struct iavf_client *client) -{ - int ret = 0; - - /* When a unregister request comes through we would have to send - * a close for each of the client instances that were opened. - * client_release function is called to handle this. - */ - iavf_client_release(client); - - if (vf_registered_client != client) { - pr_info("iavf: Client %s has not been registered\n", - client->name); - ret = -ENODEV; - goto out; - } - vf_registered_client = NULL; - pr_info("iavf: Unregistered client %s\n", client->name); -out: - return ret; -} -EXPORT_SYMBOL(iavf_unregister_client); diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h deleted file mode 100644 index 500269bc0f..0000000000 --- a/drivers/net/ethernet/intel/iavf/iavf_client.h +++ /dev/null @@ -1,169 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _IAVF_CLIENT_H_ -#define _IAVF_CLIENT_H_ - -#define IAVF_CLIENT_STR_LENGTH 10 - -/* Client interface version should be updated anytime there is a change in the - * existing APIs or data structures. - */ -#define IAVF_CLIENT_VERSION_MAJOR 0 -#define IAVF_CLIENT_VERSION_MINOR 01 -#define IAVF_CLIENT_VERSION_BUILD 00 -#define IAVF_CLIENT_VERSION_STR \ - __stringify(IAVF_CLIENT_VERSION_MAJOR) "." \ - __stringify(IAVF_CLIENT_VERSION_MINOR) "." \ - __stringify(IAVF_CLIENT_VERSION_BUILD) - -struct iavf_client_version { - u8 major; - u8 minor; - u8 build; - u8 rsvd; -}; - -enum iavf_client_state { - __IAVF_CLIENT_NULL, - __IAVF_CLIENT_REGISTERED -}; - -enum iavf_client_instance_state { - __IAVF_CLIENT_INSTANCE_NONE, - __IAVF_CLIENT_INSTANCE_OPENED, -}; - -struct iavf_ops; -struct iavf_client; - -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define IAVF_QUEUE_TYPE_PE_AEQ 0x80 -#define IAVF_QUEUE_INVALID_IDX 0xFFFF - -struct iavf_qv_info { - u32 v_idx; /* msix_vector */ - u16 ceq_idx; - u16 aeq_idx; - u8 itr_idx; -}; - -struct iavf_qvlist_info { - u32 num_vectors; - struct iavf_qv_info qv_info[]; -}; - -#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF - -/* set of LAN parameters useful for clients managed by LAN */ - -/* Struct to hold per priority info */ -struct iavf_prio_qos_params { - u16 qs_handle; /* qs handle for prio */ - u8 tc; /* TC mapped to prio */ - u8 reserved; -}; - -#define IAVF_CLIENT_MAX_USER_PRIORITY 8 -/* Struct to hold Client QoS */ -struct iavf_qos_params { - struct iavf_prio_qos_params prio_qos[IAVF_CLIENT_MAX_USER_PRIORITY]; -}; - -struct iavf_params { - struct iavf_qos_params qos; - u16 mtu; - u16 link_up; /* boolean */ -}; - -/* Structure to hold LAN device info for a client device */ -struct iavf_info { - struct iavf_client_version version; - u8 lanmac[6]; - struct net_device *netdev; - struct pci_dev *pcidev; - u8 __iomem *hw_addr; - u8 fid; /* function id, PF id or VF id */ -#define IAVF_CLIENT_FTYPE_PF 0 -#define IAVF_CLIENT_FTYPE_VF 1 - u8 ftype; /* function type, PF or VF */ - void *vf; /* cast to iavf_adapter */ - - /* All L2 params that could change during the life span of the device - * and needs to be communicated to the client when they change - */ - struct iavf_params params; - struct iavf_ops *ops; - - u16 msix_count; /* number of msix vectors*/ - /* Array down below will be dynamically allocated based on msix_count */ - struct msix_entry *msix_entries; - u16 itr_index; /* Which ITR index the PE driver is suppose to use */ -}; - -struct iavf_ops { - /* setup_q_vector_list enables queues with a particular vector */ - int (*setup_qvlist)(struct iavf_info *ldev, struct iavf_client *client, - struct iavf_qvlist_info *qv_info); - - u32 (*virtchnl_send)(struct iavf_info *ldev, struct iavf_client *client, - u8 *msg, u16 len); - - /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/ - void (*request_reset)(struct iavf_info *ldev, - struct iavf_client *client); -}; - -struct iavf_client_ops { - /* Should be called from register_client() or whenever the driver is - * ready to create a specific client instance. - */ - int (*open)(struct iavf_info *ldev, struct iavf_client *client); - - /* Should be closed when netdev is unavailable or when unregister - * call comes in. If the close happens due to a reset, set the reset - * bit to true. - */ - void (*close)(struct iavf_info *ldev, struct iavf_client *client, - bool reset); - - /* called when some l2 managed parameters changes - mss */ - void (*l2_param_change)(struct iavf_info *ldev, - struct iavf_client *client, - struct iavf_params *params); - - /* called when a message is received from the PF */ - int (*virtchnl_receive)(struct iavf_info *ldev, - struct iavf_client *client, - u8 *msg, u16 len); -}; - -/* Client device */ -struct iavf_client_instance { - struct list_head list; - struct iavf_info lan_info; - struct iavf_client *client; - unsigned long state; -}; - -struct iavf_client { - struct list_head list; /* list of registered clients */ - char name[IAVF_CLIENT_STR_LENGTH]; - struct iavf_client_version version; - unsigned long state; /* client state */ - atomic_t ref_cnt; /* Count of all the client devices of this kind */ - u32 flags; -#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) - u8 type; -#define IAVF_CLIENT_RDMA 0 - struct iavf_client_ops *ops; /* client ops provided by the client */ -}; - -/* used by clients */ -int iavf_register_client(struct iavf_client *client); -int iavf_unregister_client(struct iavf_client *client); -#endif /* _IAVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 1afd761d80..6a10c0ecf2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -1,42 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include +#include #include "iavf_type.h" #include "iavf_adminq.h" #include "iavf_prototype.h" -#include - -/** - * iavf_set_mac_type - Sets MAC type - * @hw: pointer to the HW structure - * - * This function sets the mac type of the adapter based on the - * vendor ID and device ID stored in the hw structure. - **/ -enum iavf_status iavf_set_mac_type(struct iavf_hw *hw) -{ - enum iavf_status status = 0; - - if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { - switch (hw->device_id) { - case IAVF_DEV_ID_X722_VF: - hw->mac.type = IAVF_MAC_X722_VF; - break; - case IAVF_DEV_ID_VF: - case IAVF_DEV_ID_VF_HV: - case IAVF_DEV_ID_ADAPTIVE_VF: - hw->mac.type = IAVF_MAC_VF; - break; - default: - hw->mac.type = IAVF_MAC_GENERIC; - break; - } - } else { - status = IAVF_ERR_DEVICE_NOT_SUPPORTED; - } - - return status; -} /** * iavf_aq_str - convert AQ err code to a string diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 892c6a4f03..25ba5653ac 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include +#include + /* ethtool support for iavf */ #include "iavf.h" -#include - /* ethtool statistics helpers */ /** @@ -395,11 +396,9 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) { unsigned int i; - for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { - snprintf(data, ETH_GSTRING_LEN, "%s", - iavf_gstrings_priv_flags[i].flag_string); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) + ethtool_sprintf(&data, "%s", + iavf_gstrings_priv_flags[i].flag_string); } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 03e774bd2a..65ddcd81c9 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -3,6 +3,7 @@ /* flow director ethtool support for iavf */ +#include #include "iavf.h" #define GTPU_PORT 2152 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 257865647c..e8d5b889ad 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3,7 +3,6 @@ #include "iavf.h" #include "iavf_prototype.h" -#include "iavf_client.h" /* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -1255,7 +1254,7 @@ static void iavf_configure(struct iavf_adapter *adapter) * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding crit_lock. **/ static void iavf_up_complete(struct iavf_adapter *adapter) { @@ -1265,8 +1264,6 @@ static void iavf_up_complete(struct iavf_adapter *adapter) iavf_napi_enable_all(adapter); adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; - if (CLIENT_ENABLED(adapter)) - adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } @@ -1381,7 +1378,7 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) * iavf_down - Shutdown the connection processing * @adapter: board private structure * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding crit_lock. **/ void iavf_down(struct iavf_adapter *adapter) { @@ -1401,8 +1398,10 @@ void iavf_down(struct iavf_adapter *adapter) iavf_clear_fdir_filters(adapter); iavf_clear_adv_rss_conf(adapter); - if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) && - !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) { + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) + return; + + if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait @@ -1933,6 +1932,17 @@ err_alloc_queues: return err; } +/** + * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does + * @adapter: board private structure + **/ +static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter) +{ + iavf_free_q_vectors(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_queues(adapter); +} + /** * iavf_free_rss - Free memory used by RSS structs * @adapter: board private structure @@ -1961,11 +1971,9 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool runni if (running) iavf_free_traffic_irqs(adapter); iavf_free_misc_irq(adapter); - iavf_reset_interrupt_capability(adapter); - iavf_free_q_vectors(adapter); - iavf_free_queues(adapter); + iavf_free_interrupt_scheme(adapter); - err = iavf_init_interrupt_scheme(adapter); + err = iavf_init_interrupt_scheme(adapter); if (err) goto err; @@ -2000,7 +2008,7 @@ static void iavf_finish_config(struct work_struct *work) mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && - adapter->netdev_registered && + adapter->netdev->reg_state == NETREG_REGISTERED && !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { netdev_update_features(adapter->netdev); adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; @@ -2008,7 +2016,7 @@ static void iavf_finish_config(struct work_struct *work) switch (adapter->state) { case __IAVF_DOWN: - if (!adapter->netdev_registered) { + if (adapter->netdev->reg_state != NETREG_REGISTERED) { err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", @@ -2022,7 +2030,6 @@ static void iavf_finish_config(struct work_struct *work) __IAVF_INIT_CONFIG_ADAPTER); goto out; } - adapter->netdev_registered = true; } /* Set the real number of queues when reset occurs while @@ -2337,11 +2344,6 @@ static void iavf_startup(struct iavf_adapter *adapter) /* driver loaded, probe complete */ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - status = iavf_set_mac_type(hw); - if (status) { - dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status); - goto err; - } ret = iavf_check_reset_complete(hw); if (ret) { @@ -2682,12 +2684,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) adapter->link_up = false; netif_tx_stop_all_queues(netdev); - if (CLIENT_ALLOWED(adapter)) { - err = iavf_lan_add_device(adapter); - if (err) - dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", - err); - } dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); @@ -2884,7 +2880,6 @@ static void iavf_watchdog_task(struct work_struct *work) return; } - schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); mutex_unlock(&adapter->crit_lock); restart_watchdog: if (adapter->state >= __IAVF_DOWN) @@ -2953,9 +2948,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) spin_unlock_bh(&adapter->cloud_filter_list_lock); iavf_free_misc_irq(adapter); - iavf_reset_interrupt_capability(adapter); - iavf_free_q_vectors(adapter); - iavf_free_queues(adapter); + iavf_free_interrupt_scheme(adapter); memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; @@ -2997,16 +2990,6 @@ static void iavf_reset_task(struct work_struct *work) return; } - while (!mutex_trylock(&adapter->client_lock)) - usleep_range(500, 1000); - if (CLIENT_ENABLED(adapter)) { - adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | - IAVF_FLAG_CLIENT_NEEDS_CLOSE | - IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | - IAVF_FLAG_SERVICE_CLIENT_REQUESTED); - cancel_delayed_work_sync(&adapter->client_task); - iavf_notify_client_close(&adapter->vsi, true); - } iavf_misc_irq_disable(adapter); if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; @@ -3050,7 +3033,6 @@ static void iavf_reset_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3189,7 +3171,6 @@ continue_reset: adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; wake_up(&adapter->reset_waitqueue); - mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); return; @@ -3200,7 +3181,6 @@ reset_err: } iavf_disable_vf(adapter); - mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3299,48 +3279,6 @@ out: iavf_misc_irq_enable(adapter); } -/** - * iavf_client_task - worker thread to perform client work - * @work: pointer to work_struct containing our data - * - * This task handles client interactions. Because client calls can be - * reentrant, we can't handle them in the watchdog. - **/ -static void iavf_client_task(struct work_struct *work) -{ - struct iavf_adapter *adapter = - container_of(work, struct iavf_adapter, client_task.work); - - /* If we can't get the client bit, just give up. We'll be rescheduled - * later. - */ - - if (!mutex_trylock(&adapter->client_lock)) - return; - - if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { - iavf_client_subtask(adapter); - adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - goto out; - } - if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { - iavf_notify_client_l2_params(&adapter->vsi); - adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; - goto out; - } - if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { - iavf_notify_client_close(&adapter->vsi, false); - adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; - goto out; - } - if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { - iavf_notify_client_open(&adapter->vsi); - adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; - } -out: - mutex_unlock(&adapter->client_lock); -} - /** * iavf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure @@ -4301,8 +4239,6 @@ static int iavf_close(struct net_device *netdev) } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - if (CLIENT_ENABLED(adapter)) - adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl * deadlock with adminq_task() until iavf_close timeouts. We must send @@ -4371,10 +4307,6 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; - if (CLIENT_ENABLED(adapter)) { - iavf_notify_client_l2_params(&adapter->vsi); - adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - } if (netif_running(netdev)) { iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); @@ -4452,6 +4384,9 @@ static int iavf_set_features(struct net_device *netdev, (features & NETIF_VLAN_OFFLOAD_FEATURES)) iavf_set_vlan_offload_features(adapter, netdev->features, features); + if (CRC_OFFLOAD_ALLOWED(adapter) && + ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS))) + iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) { if (features & NETIF_F_NTUPLE) @@ -4580,6 +4515,9 @@ iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) } } + if (CRC_OFFLOAD_ALLOWED(adapter)) + hw_features |= NETIF_F_RXFCS; + return hw_features; } @@ -4743,6 +4681,55 @@ iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, return requested_features; } +/** + * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features + * @adapter: board private structure + * @requested_features: stack requested NETDEV features + * + * Returns fixed-up features bits + **/ +static netdev_features_t +iavf_fix_strip_features(struct iavf_adapter *adapter, + netdev_features_t requested_features) +{ + struct net_device *netdev = adapter->netdev; + bool crc_offload_req, is_vlan_strip; + netdev_features_t vlan_strip; + int num_non_zero_vlan; + + crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) && + (requested_features & NETIF_F_RXFCS); + num_non_zero_vlan = iavf_get_num_vlans_added(adapter); + vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); + is_vlan_strip = requested_features & vlan_strip; + + if (!crc_offload_req) + return requested_features; + + if (!num_non_zero_vlan && (netdev->features & vlan_strip) && + !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) { + requested_features &= ~vlan_strip; + netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); + return requested_features; + } + + if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) { + requested_features &= ~vlan_strip; + if (!(netdev->features & vlan_strip)) + netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping"); + + return requested_features; + } + + if (num_non_zero_vlan && is_vlan_strip && + !(netdev->features & NETIF_F_RXFCS)) { + requested_features &= ~NETIF_F_RXFCS; + netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping"); + } + + return requested_features; +} + /** * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device @@ -4755,10 +4742,12 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); + features = iavf_fix_netdev_vlan_features(adapter, features); + if (!FDIR_FLTR_SUPPORT(adapter)) features &= ~NETIF_F_NTUPLE; - return iavf_fix_netdev_vlan_features(adapter, features); + return iavf_fix_strip_features(adapter, features); } static const struct net_device_ops iavf_netdev_ops = { @@ -4795,7 +4784,7 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) if ((rstat == VIRTCHNL_VFR_VFACTIVE) || (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; - usleep_range(10, 20); + msleep(IAVF_RESET_WAIT_MS); } return -EBUSY; } @@ -4992,7 +4981,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * and destroy them only once in remove */ mutex_init(&adapter->crit_lock); - mutex_init(&adapter->client_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -5012,7 +5000,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_WORK(&adapter->finish_config, iavf_finish_config); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); - INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); @@ -5053,8 +5040,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) netif_device_detach(netdev); - while (!mutex_trylock(&adapter->crit_lock)) - usleep_range(500, 1000); + mutex_lock(&adapter->crit_lock); if (netif_running(netdev)) { rtnl_lock(); @@ -5125,7 +5111,6 @@ static void iavf_remove(struct pci_dev *pdev) struct iavf_adapter *adapter; struct net_device *netdev; struct iavf_hw *hw; - int err; /* Don't proceed with remove if netdev is already freed */ netdev = pci_get_drvdata(pdev); @@ -5161,19 +5146,8 @@ static void iavf_remove(struct pci_dev *pdev) cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->finish_config); - rtnl_lock(); - if (adapter->netdev_registered) { - unregister_netdevice(netdev); - adapter->netdev_registered = false; - } - rtnl_unlock(); - - if (CLIENT_ALLOWED(adapter)) { - err = iavf_lan_del_device(adapter); - if (err) - dev_warn(&pdev->dev, "Failed to delete client device: %d\n", - err); - } + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); @@ -5192,7 +5166,6 @@ static void iavf_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->adminq_task); - cancel_delayed_work_sync(&adapter->client_task); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; @@ -5200,9 +5173,7 @@ static void iavf_remove(struct pci_dev *pdev) iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); iavf_free_misc_irq(adapter); - - iavf_reset_interrupt_capability(adapter); - iavf_free_q_vectors(adapter); + iavf_free_interrupt_scheme(adapter); iavf_free_rss(adapter); @@ -5212,13 +5183,11 @@ static void iavf_remove(struct pci_dev *pdev) /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); - mutex_destroy(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); - iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h index 940cb4203f..4a48e61714 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -45,8 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, struct iavf_aqc_get_set_rss_key_data *key); -enum iavf_status iavf_set_mac_type(struct iavf_hw *hw); - extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[]; static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 8c5f6096b0..fb7edba9c2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1,14 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include #include #include "iavf.h" #include "iavf_trace.h" #include "iavf_prototype.h" -static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, - u32 td_tag) +static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) { return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | @@ -370,8 +371,8 @@ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, q_vector->arm_wb_state = true; } -static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, - struct iavf_ring_container *rc) +static bool iavf_container_is_rx(struct iavf_q_vector *q_vector, + struct iavf_ring_container *rc) { return &q_vector->rx == rc; } @@ -806,7 +807,7 @@ err: * @rx_ring: ring to bump * @val: new head index **/ -static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) +static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -828,7 +829,7 @@ static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) * * Returns the offset value for ring into the data buffer. */ -static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) +static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; } @@ -977,9 +978,9 @@ no_buffers: * @skb: skb currently being received and modified * @rx_desc: the receive descriptor **/ -static inline void iavf_rx_checksum(struct iavf_vsi *vsi, - struct sk_buff *skb, - union iavf_rx_desc *rx_desc) +static void iavf_rx_checksum(struct iavf_vsi *vsi, + struct sk_buff *skb, + union iavf_rx_desc *rx_desc) { struct iavf_rx_ptype_decoded decoded; u32 rx_error, rx_status; @@ -1061,7 +1062,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline int iavf_ptype_to_htype(u8 ptype) +static int iavf_ptype_to_htype(u8 ptype) { struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1085,10 +1086,10 @@ static inline int iavf_ptype_to_htype(u8 ptype) * @skb: skb currently being received and modified * @rx_ptype: Rx packet type **/ -static inline void iavf_rx_hash(struct iavf_ring *ring, - union iavf_rx_desc *rx_desc, - struct sk_buff *skb, - u8 rx_ptype) +static void iavf_rx_hash(struct iavf_ring *ring, + union iavf_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) { u32 hash; const __le64 rss_mask = @@ -1115,10 +1116,10 @@ static inline void iavf_rx_hash(struct iavf_ring *ring, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static inline -void iavf_process_skb_fields(struct iavf_ring *rx_ring, - union iavf_rx_desc *rx_desc, struct sk_buff *skb, - u8 rx_ptype) +static void +iavf_process_skb_fields(struct iavf_ring *rx_ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); @@ -1662,8 +1663,8 @@ static inline u32 iavf_buildreg_itr(const int type, u16 itr) * @q_vector: q_vector for which itr is being updated and interrupt enabled * **/ -static inline void iavf_update_enable_itr(struct iavf_vsi *vsi, - struct iavf_q_vector *q_vector) +static void iavf_update_enable_itr(struct iavf_vsi *vsi, + struct iavf_q_vector *q_vector) { struct iavf_hw *hw = &vsi->back->hw; u32 intval; @@ -2275,9 +2276,9 @@ int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, - struct iavf_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, + struct iavf_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index 9f1f523807..2b6a207fa4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -69,15 +69,6 @@ enum iavf_debug_mask { * the Firmware and AdminQ are intended to insulate the driver from most of the * future changes, but these structures will also do part of the job. */ -enum iavf_mac_type { - IAVF_MAC_UNKNOWN = 0, - IAVF_MAC_XL710, - IAVF_MAC_VF, - IAVF_MAC_X722, - IAVF_MAC_X722_VF, - IAVF_MAC_GENERIC, -}; - enum iavf_vsi_type { IAVF_VSI_MAIN = 0, IAVF_VSI_VMDQ1 = 1, @@ -110,11 +101,8 @@ struct iavf_hw_capabilities { }; struct iavf_mac_info { - enum iavf_mac_type type; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; - u16 max_fcoeq; }; /* PCI bus types */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index b95a4f9032..2d9366be0e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -3,7 +3,6 @@ #include "iavf.h" #include "iavf_prototype.h" -#include "iavf_client.h" /** * iavf_send_pf_msg @@ -142,6 +141,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | + VIRTCHNL_VF_OFFLOAD_CRC | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_ADQ | @@ -312,6 +312,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter) vqpi->rxq.databuffer_size = ALIGN(adapter->rx_rings[i].rx_buf_len, BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); + if (CRC_OFFLOAD_ALLOWED(adapter)) + vqpi->rxq.crc_disable = !!(adapter->netdev->features & + NETIF_F_RXFCS); vqpi++; } @@ -1374,8 +1377,6 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); } -#define IAVF_MAX_SPEED_STRLEN 13 - /** * iavf_print_link_message - print link up or down * @adapter: adapter structure @@ -1393,10 +1394,6 @@ static void iavf_print_link_message(struct iavf_adapter *adapter) return; } - speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); - if (!speed) - return; - if (ADV_LINK_SUPPORT(adapter)) { link_speed_mbps = adapter->link_speed_mbps; goto print_link_msg; @@ -1434,17 +1431,17 @@ static void iavf_print_link_message(struct iavf_adapter *adapter) print_link_msg: if (link_speed_mbps > SPEED_1000) { - if (link_speed_mbps == SPEED_2500) - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); - else + if (link_speed_mbps == SPEED_2500) { + speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps"); + } else { /* convert to Gbps inline */ - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", - link_speed_mbps / 1000, "Gbps"); + speed = kasprintf(GFP_KERNEL, "%d Gbps", + link_speed_mbps / 1000); + } } else if (link_speed_mbps == SPEED_UNKNOWN) { - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); + speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps"); } else { - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", - link_speed_mbps, "Mbps"); + speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps); } netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); @@ -2361,19 +2358,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; - case VIRTCHNL_OP_RDMA: - /* Gobble zero-length replies from the PF. They indicate that - * a previous message was received OK, and the client doesn't - * care about that. - */ - if (msglen && CLIENT_ENABLED(adapter)) - iavf_notify_client_message(&adapter->vsi, msg, msglen); - break; - - case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: - adapter->client_pending &= - ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP)); - break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 960277d78e..0679907980 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -43,7 +43,7 @@ ice-$(CONFIG_PCI_IOV) += \ ice_vf_mbx.o \ ice_vf_vsi_vlan_ops.o \ ice_vf_lib.o -ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o +ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 5022b036ca..351e0d36df 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -76,6 +76,7 @@ #include "ice_vsi_vlan_ops.h" #include "ice_gnss.h" #include "ice_irq.h" +#include "ice_dpll.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -195,10 +196,13 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) +#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) + enum ice_feature { ICE_F_DSCP, - ICE_F_PTP_EXTTS, + ICE_F_PHY_RCLK, ICE_F_SMA_CTRL, + ICE_F_CGU, ICE_F_GNSS, ICE_F_ROCE_LAG, ICE_F_SRIOV_LAG, @@ -508,6 +512,7 @@ enum ice_pf_flags { ICE_FLAG_UNPLUG_AUX_DEV, ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ + ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -549,6 +554,8 @@ struct ice_pf { * MSIX vectors allowed on this PF. */ u16 sriov_base_vector; + unsigned long *sriov_irq_bm; /* bitmap to track irq usage */ + u16 sriov_irq_size; /* size of the irq_bm bitmap */ u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ @@ -640,6 +647,7 @@ struct ice_pf { #define ICE_VF_AGG_NODE_ID_START 65 #define ICE_MAX_VF_AGG_NODES 32 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; + struct ice_dplls dplls; }; extern struct workqueue_struct *ice_lag_wq; @@ -667,6 +675,18 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) return !!qv->ch; /* Enable it to run with TC */ } +/** + * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt + * @pf: Board private structure + * + * Return true if this PF should respond to the Tx timestamp interrupt + * indication in the miscellaneous OICR interrupt handler. + */ +static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf) +{ + return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; +} + /** * ice_irq_dynamic_ena - Enable default interrupt generation settings * @hw: pointer to HW struct @@ -942,6 +962,7 @@ int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); int ice_load(struct ice_pf *pf); void ice_unload(struct ice_pf *pf); +void ice_adv_lnk_speed_maps_init(void); /** * ice_set_rdma_cap - enable RDMA support diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 29f7a9852a..b8437c36ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -421,10 +421,10 @@ struct ice_aqc_vsi_props { #define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2) #define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3 #define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) -#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) -#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) -#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) -#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH 0x0U +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP 0x1U +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR 0x2U +#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING 0x3U u8 inner_vlan_reserved2[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ @@ -490,11 +490,11 @@ struct ice_aqc_vsi_props { #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) #define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 -#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M GENMASK(7, 6) +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ 0x0U +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ 0x1U +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR 0x2U +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_JHASH 0x3U u8 q_opt_tc; #define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 #define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) @@ -1099,7 +1099,15 @@ struct ice_aqc_get_phy_caps { #define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) #define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) #define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) -#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4 +#define ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 BIT_ULL(5) +#define ICE_PHY_TYPE_HIGH_200G_SR4 BIT_ULL(6) +#define ICE_PHY_TYPE_HIGH_200G_FR4 BIT_ULL(7) +#define ICE_PHY_TYPE_HIGH_200G_LR4 BIT_ULL(8) +#define ICE_PHY_TYPE_HIGH_200G_DR4 BIT_ULL(9) +#define ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 BIT_ULL(10) +#define ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC BIT_ULL(11) +#define ICE_PHY_TYPE_HIGH_200G_AUI4 BIT_ULL(12) +#define ICE_PHY_TYPE_HIGH_MAX_INDEX 12 struct ice_aqc_get_phy_caps_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ @@ -1319,11 +1327,42 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_SPEED_40GB BIT(8) #define ICE_AQ_LINK_SPEED_50GB BIT(9) #define ICE_AQ_LINK_SPEED_100GB BIT(10) +#define ICE_AQ_LINK_SPEED_200GB BIT(11) #define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) - __le32 reserved3; /* Aligns next field to 8-byte boundary */ - __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ - __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ -}; + /* Aligns next field to 8-byte boundary */ + __le16 reserved3; + u8 ext_fec_status; + /* RS 272 FEC enabled */ +#define ICE_AQ_LINK_RS_272_FEC_EN BIT(0) + u8 reserved4; + /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 phy_type_low; + /* Use values from ICE_PHY_TYPE_HIGH_* */ + __le64 phy_type_high; +#define ICE_AQC_LS_DATA_SIZE_V1 \ + offsetofend(struct ice_aqc_get_link_status_data, phy_type_high) + /* Get link status v2 link partner data */ + __le64 lp_phy_type_low; + __le64 lp_phy_type_high; + u8 lp_fec_adv; +#define ICE_AQ_LINK_LP_10G_KR_FEC_CAP BIT(0) +#define ICE_AQ_LINK_LP_25G_KR_FEC_CAP BIT(1) +#define ICE_AQ_LINK_LP_RS_528_FEC_CAP BIT(2) +#define ICE_AQ_LINK_LP_50G_KR_272_FEC_CAP BIT(3) +#define ICE_AQ_LINK_LP_100G_KR_272_FEC_CAP BIT(4) +#define ICE_AQ_LINK_LP_200G_KR_272_FEC_CAP BIT(5) + u8 lp_fec_req; +#define ICE_AQ_LINK_LP_10G_KR_FEC_REQ BIT(0) +#define ICE_AQ_LINK_LP_25G_KR_FEC_REQ BIT(1) +#define ICE_AQ_LINK_LP_RS_528_FEC_REQ BIT(2) +#define ICE_AQ_LINK_LP_KR_272_FEC_REQ BIT(3) + u8 lp_flowcontrol; +#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0) +#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1) + u8 reserved5[5]; +#define ICE_AQC_LS_DATA_SIZE_V2 \ + offsetofend(struct ice_aqc_get_link_status_data, reserved5) +} __packed; /* Set event mask command (direct 0x0613) */ struct ice_aqc_set_event_mask { @@ -1351,6 +1390,30 @@ struct ice_aqc_set_mac_lb { u8 reserved[15]; }; +/* Set PHY recovered clock output (direct 0x0630) */ +struct ice_aqc_set_phy_rec_clk_out { + u8 phy_output; + u8 port_num; +#define ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT 0xFF + u8 flags; +#define ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN BIT(0) + u8 rsvd; + __le32 freq; + u8 rsvd2[6]; + __le16 node_handle; +}; + +/* Get PHY recovered clock output (direct 0x0631) */ +struct ice_aqc_get_phy_rec_clk_out { + u8 phy_output; + u8 port_num; +#define ICE_AQC_GET_PHY_REC_CLK_OUT_CURR_PORT 0xFF + u8 flags; +#define ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN BIT(0) + u8 rsvd[11]; + __le16 node_handle; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -1367,6 +1430,9 @@ struct ice_aqc_link_topo_params { #define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 #define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 #define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL 9 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX 10 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11 #define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_M \ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) @@ -1403,8 +1469,13 @@ struct ice_aqc_link_topo_addr { struct ice_aqc_get_link_topo { struct ice_aqc_link_topo_addr addr; u8 node_part_num; -#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 -#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 0x25 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY 0x30 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48 u8 rsvd[9]; }; @@ -2125,6 +2196,193 @@ struct ice_aqc_get_pkg_info_resp { struct ice_aqc_get_pkg_info pkg_info[]; }; +/* Get CGU abilities command response data structure (indirect 0x0C61) */ +struct ice_aqc_get_cgu_abilities { + u8 num_inputs; + u8 num_outputs; + u8 pps_dpll_idx; + u8 eec_dpll_idx; + __le32 max_in_freq; + __le32 max_in_phase_adj; + __le32 max_out_freq; + __le32 max_out_phase_adj; + u8 cgu_part_num; + u8 rsvd[3]; +}; + +/* Set CGU input config (direct 0x0C62) */ +struct ice_aqc_set_cgu_input_config { + u8 input_idx; + u8 flags1; +#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6) +#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7) + u8 flags2; +#define ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5) +#define ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6) + u8 rsvd; + __le32 freq; + __le32 phase_delay; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU input config response descriptor structure (direct 0x0C63) */ +struct ice_aqc_get_cgu_input_config { + u8 input_idx; + u8 status; +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_LOS BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7) + u8 type; +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_GPS BIT(4) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_PHY BIT(6) + u8 flags1; +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7) + __le32 freq; + __le32 phase_delay; + u8 flags2; +#define ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5) +#define ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6) + u8 rsvd[1]; + __le16 node_handle; +}; + +/* Set CGU output config (direct 0x0C64) */ +struct ice_aqc_set_cgu_output_config { + u8 output_idx; + u8 flags; +#define ICE_AQC_SET_CGU_OUT_CFG_OUT_EN BIT(0) +#define ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN BIT(1) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4) + u8 src_sel; +#define ICE_AQC_SET_CGU_OUT_CFG_DPLL_SRC_SEL ICE_M(0x1F, 0) + u8 rsvd; + __le32 freq; + __le32 phase_delay; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU output config (direct 0x0C65) */ +struct ice_aqc_get_cgu_output_config { + u8 output_idx; + u8 flags; +#define ICE_AQC_GET_CGU_OUT_CFG_OUT_EN BIT(0) +#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN BIT(1) +#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2) + u8 src_sel; +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0 +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL \ + ICE_M(0x1F, ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT) +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5 +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE \ + ICE_M(0x7, ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT) + u8 rsvd; + __le32 freq; + __le32 src_freq; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU DPLL status (direct 0x0C66) */ +struct ice_aqc_get_cgu_dpll_status { + u8 dpll_num; + u8 ref_state; +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4) +#define ICE_AQC_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6) + u8 dpll_state; +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO BIT(1) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7) + u8 config; +#define ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0) +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT 5 +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE \ + ICE_M(0x7, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT) +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_FREERUN 0 +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \ + ICE_M(0x3, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT) + __le32 phase_offset_h; + __le32 phase_offset_l; + u8 eec_mode; +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF + u8 rsvd[1]; + __le16 node_handle; +}; + +/* Set CGU DPLL config (direct 0x0C67) */ +struct ice_aqc_set_cgu_dpll_config { + u8 dpll_num; + u8 ref_state; +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6) + u8 rsvd; + u8 config; +#define ICE_AQC_SET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT 5 +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE \ + ICE_M(0x7, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_FREERUN 0 +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \ + ICE_M(0x3, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT) + u8 rsvd2[8]; + u8 eec_mode; + u8 rsvd3[1]; + __le16 node_handle; +}; + +/* Set CGU reference priority (direct 0x0C68) */ +struct ice_aqc_set_cgu_ref_prio { + u8 dpll_num; + u8 ref_idx; + u8 ref_priority; + u8 rsvd[11]; + __le16 node_handle; +}; + +/* Get CGU reference priority (direct 0x0C69) */ +struct ice_aqc_get_cgu_ref_prio { + u8 dpll_num; + u8 ref_idx; + u8 ref_priority; /* Valid only in response */ + u8 rsvd[13]; +}; + +/* Get CGU info (direct 0x0C6A) */ +struct ice_aqc_get_cgu_info { + __le32 cgu_id; + __le32 cgu_cfg_ver; + __le32 cgu_fw_ver; + u8 node_part_num; + u8 dev_rev; + __le16 node_handle; +}; + /* Driver Shared Parameters (direct, 0x0C90) */ struct ice_aqc_driver_shared_params { u8 set_or_get_op; @@ -2139,16 +2397,6 @@ struct ice_aqc_driver_shared_params { __le32 addr_low; }; -enum ice_aqc_driver_params { - /* OS clock index for PTP timer Domain 0 */ - ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0, - /* OS clock index for PTP timer Domain 1 */ - ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1, - - /* Add new parameters above */ - ICE_AQC_DRIVER_PARAM_MAX = 16, -}; - /* Lan Queue Overflow Event (direct, 0x1001) */ struct ice_aqc_event_lan_overflow { __le32 prtdcb_ruptq; @@ -2194,6 +2442,8 @@ struct ice_aq_desc { struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; + struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out; + struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; @@ -2234,6 +2484,15 @@ struct ice_aq_desc { struct ice_aqc_fw_logging fw_logging; struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_download_pkg download_pkg; + struct ice_aqc_set_cgu_input_config set_cgu_input_config; + struct ice_aqc_get_cgu_input_config get_cgu_input_config; + struct ice_aqc_set_cgu_output_config set_cgu_output_config; + struct ice_aqc_get_cgu_output_config get_cgu_output_config; + struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status; + struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config; + struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio; + struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio; + struct ice_aqc_get_cgu_info get_cgu_info; struct ice_aqc_driver_shared_params drv_shared_params; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; @@ -2358,6 +2617,8 @@ enum ice_adminq_opc { ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, + ice_aqc_opc_set_phy_rec_clk_out = 0x0630, + ice_aqc_opc_get_phy_rec_clk_out = 0x0631, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, @@ -2413,6 +2674,18 @@ enum ice_adminq_opc { ice_aqc_opc_update_pkg = 0x0C42, ice_aqc_opc_get_pkg_info_list = 0x0C43, + /* 1588/SyncE commands/events */ + ice_aqc_opc_get_cgu_abilities = 0x0C61, + ice_aqc_opc_set_cgu_input_config = 0x0C62, + ice_aqc_opc_get_cgu_input_config = 0x0C63, + ice_aqc_opc_set_cgu_output_config = 0x0C64, + ice_aqc_opc_get_cgu_output_config = 0x0C65, + ice_aqc_opc_get_cgu_dpll_status = 0x0C66, + ice_aqc_opc_set_cgu_dpll_config = 0x0C67, + ice_aqc_opc_set_cgu_ref_prio = 0x0C68, + ice_aqc_opc_get_cgu_ref_prio = 0x0C69, + ice_aqc_opc_get_cgu_info = 0x0C6A, + ice_aqc_opc_driver_shared_params = 0x0C90, /* Standalone Commands/Events */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 80deca45ab..edac34c796 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ #include "ice_common.h" #include "ice_sched.h" @@ -8,6 +8,7 @@ #include "ice_ptp_hw.h" #define ICE_PF_RESET_WAIT_COUNT 300 +#define ICE_MAX_NETLIST_SIZE 10 static const char * const ice_link_mode_str_low[] = { [0] = "100BASE_TX", @@ -153,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E823L_SFP: hw->mac_type = ICE_MAC_GENERIC; break; + case ICE_DEV_ID_E830_BACKPLANE: + case ICE_DEV_ID_E830_QSFP56: + case ICE_DEV_ID_E830_SFP: + case ICE_DEV_ID_E830_SFP_DD: + hw->mac_type = ICE_MAC_E830; + break; default: hw->mac_type = ICE_MAC_UNKNOWN; break; @@ -435,6 +442,80 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); } +/** + * ice_aq_get_netlist_node + * @hw: pointer to the hw struct + * @cmd: get_link_topo AQ structure + * @node_part_number: output node part number if node found + * @node_handle: output node handle parameter if node found + * + * Get netlist node handle. + */ +int +ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + desc.params.get_link_topo = *cmd; + + if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) + return -EINTR; + + if (node_handle) + *node_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + if (node_part_number) + *node_part_number = desc.params.get_link_topo.node_part_num; + + return 0; +} + +/** + * ice_find_netlist_node + * @hw: pointer to the hw struct + * @node_type_ctx: type of netlist node to look for + * @node_part_number: node part number to look for + * @node_handle: output parameter if node found - optional + * + * Scan the netlist for a node handle of the given node type and part number. + * + * If node_handle is non-NULL it will be modified on function exit. It is only + * valid if the function returns zero, and should be ignored on any non-zero + * return value. + * + * Returns: 0 if the node is found, -ENOENT if no handle was found, and + * a negative error code on failure to access the AQ. + */ +static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, + u8 node_part_number, u16 *node_handle) +{ + u8 idx; + + for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { + struct ice_aqc_get_link_topo cmd = {}; + u8 rec_node_part_number; + int status; + + cmd.addr.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, + node_type_ctx); + cmd.addr.topo_params.index = idx; + + status = ice_aq_get_netlist_node(hw, &cmd, + &rec_node_part_number, + node_handle); + if (status) + return status; + + if (rec_node_part_number == node_part_number) + return 0; + } + + return -ENOENT; +} + /** * ice_is_media_cage_present * @pi: port information structure @@ -570,6 +651,24 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) return ICE_MEDIA_UNKNOWN; } +/** + * ice_get_link_status_datalen + * @hw: pointer to the HW struct + * + * Returns datalength for the Get Link Status AQ command, which is bigger for + * newer adapter families handled by ice driver. + */ +static u16 ice_get_link_status_datalen(struct ice_hw *hw) +{ + switch (hw->mac_type) { + case ICE_MAC_E830: + return ICE_AQC_LS_DATA_SIZE_V2; + case ICE_MAC_E810: + default: + return ICE_AQC_LS_DATA_SIZE_V1; + } +} + /** * ice_aq_get_link_info * @pi: port information structure @@ -608,8 +707,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, resp->cmd_flags = cpu_to_le16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); - + status = ice_aq_send_cmd(hw, &desc, &link_data, + ice_get_link_status_datalen(hw), cd); if (status) return status; @@ -684,8 +783,7 @@ static void ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, struct ice_aqc_set_mac_cfg *cmd) { - u16 fc_thres_val, tx_timer_val; - u32 val; + u32 val, fc_thres_m; /* We read back the transmit timer and FC threshold value of * LFC. Thus, we will use index = @@ -694,19 +792,32 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, * Also, because we are operating on transmit timer and FC * threshold of LFC, we don't turn on any bit in tx_tmr_priority */ -#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX - - /* Retrieve the transmit timer */ - val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); - tx_timer_val = val & - PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; - cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); - - /* Retrieve the FC threshold */ - val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); - fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; - - cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); +#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX +#define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR + + if (hw->mac_type == ICE_MAC_E830) { + /* Retrieve the transmit timer */ + val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); + cmd->tx_tmr_value = + le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); + + /* Retrieve the fc threshold */ + val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); + fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; + } else { + /* Retrieve the transmit timer */ + val = rd32(hw, + E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); + cmd->tx_tmr_value = + le16_encode_bits(val, + E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); + + /* Retrieve the fc threshold */ + val = rd32(hw, + E800_REFRESH_TMR(E800_IDX_OF_LFC)); + fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; + } + cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); } /** @@ -2389,16 +2500,21 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, static void ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) { - u32 reg_val, val; + u32 reg_val, gsize, bsize; reg_val = rd32(hw, GLQF_FD_SIZE); - val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> - GLQF_FD_SIZE_FD_GSIZE_S; - func_p->fd_fltr_guar = - ice_get_num_per_func(hw, val); - val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> - GLQF_FD_SIZE_FD_BSIZE_S; - func_p->fd_fltr_best_effort = val; + switch (hw->mac_type) { + case ICE_MAC_E830: + gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); + bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); + break; + case ICE_MAC_E810: + default: + gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); + bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); + } + func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); + func_p->fd_fltr_best_effort = bsize; ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", func_p->fd_fltr_guar); @@ -2654,33 +2770,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, ice_recalc_port_limited_caps(hw, &dev_p->common_cap); } -/** - * ice_aq_get_netlist_node - * @hw: pointer to the hw struct - * @cmd: get_link_topo AQ structure - * @node_part_number: output node part number if node found - * @node_handle: output node handle parameter if node found - */ -static int -ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, - u8 *node_part_number, u16 *node_handle) -{ - struct ice_aq_desc desc; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); - desc.params.get_link_topo = *cmd; - - if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) - return -EIO; - - if (node_handle) - *node_handle = le16_to_cpu(desc.params.get_link_topo.addr.handle); - if (node_part_number) - *node_part_number = desc.params.get_link_topo.node_part_num; - - return 0; -} - /** * ice_is_pf_c827 - check if pf contains c827 phy * @hw: pointer to the hw struct @@ -2715,6 +2804,82 @@ bool ice_is_pf_c827(struct ice_hw *hw) return false; } +/** + * ice_is_phy_rclk_in_netlist + * @hw: pointer to the hw struct + * + * Check if the PHY Recovered Clock device is present in the netlist + */ +bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && + ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) + return false; + + return true; +} + +/** + * ice_is_clock_mux_in_netlist + * @hw: pointer to the hw struct + * + * Check if the Clock Multiplexer device is present in the netlist + */ +bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, + ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, + NULL)) + return false; + + return true; +} + +/** + * ice_is_cgu_in_netlist - check for CGU presence + * @hw: pointer to the hw struct + * + * Check if the Clock Generation Unit (CGU) device is present in the netlist. + * Save the CGU part number in the hw structure for later use. + * Return: + * * true - cgu is present + * * false - cgu is not present + */ +bool ice_is_cgu_in_netlist(struct ice_hw *hw) +{ + if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, + NULL)) { + hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; + return true; + } else if (!ice_find_netlist_node(hw, + ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, + NULL)) { + hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; + return true; + } + + return false; +} + +/** + * ice_is_gps_in_netlist + * @hw: pointer to the hw struct + * + * Check if the GPS generic device is present in the netlist + */ +bool ice_is_gps_in_netlist(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, + ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) + return false; + + return true; +} + /** * ice_aq_list_caps - query function/device capabilities * @hw: pointer to the HW struct @@ -4726,11 +4891,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { - struct ice_aqc_dis_txq_item *qg_list; + DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); + u16 i, buf_size = __struct_size(qg_list); struct ice_q_ctx *q_ctx; int status = -ENOENT; struct ice_hw *hw; - u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return -EIO; @@ -4748,11 +4913,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, return -EIO; } - buf_size = struct_size(qg_list, q_id, 1); - qg_list = kzalloc(buf_size, GFP_KERNEL); - if (!qg_list) - return -ENOMEM; - mutex_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { @@ -4785,7 +4945,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, q_ctx->q_teid = ICE_INVAL_TEID; } mutex_unlock(&pi->sched_lock); - kfree(qg_list); return status; } @@ -4954,10 +5113,10 @@ int ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id) { - struct ice_aqc_dis_txq_item *qg_list; + DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); + u16 qg_size = __struct_size(qg_list); struct ice_hw *hw; int status = 0; - u16 qg_size; int i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) @@ -4965,11 +5124,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, hw = pi->hw; - qg_size = struct_size(qg_list, q_id, 1); - qg_list = kzalloc(qg_size, GFP_KERNEL); - if (!qg_list) - return -ENOMEM; - mutex_lock(&pi->sched_lock); for (i = 0; i < count; i++) { @@ -4994,7 +5148,393 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, } mutex_unlock(&pi->sched_lock); - kfree(qg_list); + return status; +} + +/** + * ice_aq_get_cgu_abilities - get cgu abilities + * @hw: pointer to the HW struct + * @abilities: CGU abilities + * + * Get CGU abilities (0x0C61) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_abilities(struct ice_hw *hw, + struct ice_aqc_get_cgu_abilities *abilities) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); + return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); +} + +/** + * ice_aq_set_input_pin_cfg - set input pin config + * @hw: pointer to the HW struct + * @input_idx: Input index + * @flags1: Input flags + * @flags2: Input flags + * @freq: Frequency in Hz + * @phase_delay: Delay in ps + * + * Set CGU input config (0x0C62) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, + u32 freq, s32 phase_delay) +{ + struct ice_aqc_set_cgu_input_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); + cmd = &desc.params.set_cgu_input_config; + cmd->input_idx = input_idx; + cmd->flags1 = flags1; + cmd->flags2 = flags2; + cmd->freq = cpu_to_le32(freq); + cmd->phase_delay = cpu_to_le32(phase_delay); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_input_pin_cfg - get input pin config + * @hw: pointer to the HW struct + * @input_idx: Input index + * @status: Pin status + * @type: Pin type + * @flags1: Input flags + * @flags2: Input flags + * @freq: Frequency in Hz + * @phase_delay: Delay in ps + * + * Get CGU input config (0x0C63) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, + u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) +{ + struct ice_aqc_get_cgu_input_config *cmd; + struct ice_aq_desc desc; + int ret; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); + cmd = &desc.params.get_cgu_input_config; + cmd->input_idx = input_idx; + + ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!ret) { + if (status) + *status = cmd->status; + if (type) + *type = cmd->type; + if (flags1) + *flags1 = cmd->flags1; + if (flags2) + *flags2 = cmd->flags2; + if (freq) + *freq = le32_to_cpu(cmd->freq); + if (phase_delay) + *phase_delay = le32_to_cpu(cmd->phase_delay); + } + + return ret; +} + +/** + * ice_aq_set_output_pin_cfg - set output pin config + * @hw: pointer to the HW struct + * @output_idx: Output index + * @flags: Output flags + * @src_sel: Index of DPLL block + * @freq: Output frequency + * @phase_delay: Output phase compensation + * + * Set CGU output config (0x0C64) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, + u8 src_sel, u32 freq, s32 phase_delay) +{ + struct ice_aqc_set_cgu_output_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); + cmd = &desc.params.set_cgu_output_config; + cmd->output_idx = output_idx; + cmd->flags = flags; + cmd->src_sel = src_sel; + cmd->freq = cpu_to_le32(freq); + cmd->phase_delay = cpu_to_le32(phase_delay); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_output_pin_cfg - get output pin config + * @hw: pointer to the HW struct + * @output_idx: Output index + * @flags: Output flags + * @src_sel: Internal DPLL source + * @freq: Output frequency + * @src_freq: Source frequency + * + * Get CGU output config (0x0C65) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, + u8 *src_sel, u32 *freq, u32 *src_freq) +{ + struct ice_aqc_get_cgu_output_config *cmd; + struct ice_aq_desc desc; + int ret; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); + cmd = &desc.params.get_cgu_output_config; + cmd->output_idx = output_idx; + + ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!ret) { + if (flags) + *flags = cmd->flags; + if (src_sel) + *src_sel = cmd->src_sel; + if (freq) + *freq = le32_to_cpu(cmd->freq); + if (src_freq) + *src_freq = le32_to_cpu(cmd->src_freq); + } + + return ret; +} + +/** + * ice_aq_get_cgu_dpll_status - get dpll status + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_state: Reference clock state + * @config: current DPLL config + * @dpll_state: current DPLL state + * @phase_offset: Phase offset in ns + * @eec_mode: EEC_mode + * + * Get CGU DPLL status (0x0C66) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, + u8 *dpll_state, u8 *config, s64 *phase_offset, + u8 *eec_mode) +{ + struct ice_aqc_get_cgu_dpll_status *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); + cmd = &desc.params.get_cgu_dpll_status; + cmd->dpll_num = dpll_num; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *ref_state = cmd->ref_state; + *dpll_state = cmd->dpll_state; + *config = cmd->config; + *phase_offset = le32_to_cpu(cmd->phase_offset_h); + *phase_offset <<= 32; + *phase_offset += le32_to_cpu(cmd->phase_offset_l); + *phase_offset = sign_extend64(*phase_offset, 47); + *eec_mode = cmd->eec_mode; + } + + return status; +} + +/** + * ice_aq_set_cgu_dpll_config - set dpll config + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_state: Reference clock state + * @config: DPLL config + * @eec_mode: EEC mode + * + * Set CGU DPLL config (0x0C67) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, + u8 config, u8 eec_mode) +{ + struct ice_aqc_set_cgu_dpll_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); + cmd = &desc.params.set_cgu_dpll_config; + cmd->dpll_num = dpll_num; + cmd->ref_state = ref_state; + cmd->config = config; + cmd->eec_mode = eec_mode; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_set_cgu_ref_prio - set input reference priority + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_idx: Reference pin index + * @ref_priority: Reference input priority + * + * Set CGU reference priority (0x0C68) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 ref_priority) +{ + struct ice_aqc_set_cgu_ref_prio *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); + cmd = &desc.params.set_cgu_ref_prio; + cmd->dpll_num = dpll_num; + cmd->ref_idx = ref_idx; + cmd->ref_priority = ref_priority; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_cgu_ref_prio - get input reference priority + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_idx: Reference pin index + * @ref_prio: Reference input priority + * + * Get CGU reference priority (0x0C69) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 *ref_prio) +{ + struct ice_aqc_get_cgu_ref_prio *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); + cmd = &desc.params.get_cgu_ref_prio; + cmd->dpll_num = dpll_num; + cmd->ref_idx = ref_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + *ref_prio = cmd->ref_priority; + + return status; +} + +/** + * ice_aq_get_cgu_info - get cgu info + * @hw: pointer to the HW struct + * @cgu_id: CGU ID + * @cgu_cfg_ver: CGU config version + * @cgu_fw_ver: CGU firmware version + * + * Get CGU info (0x0C6A) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, + u32 *cgu_fw_ver) +{ + struct ice_aqc_get_cgu_info *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); + cmd = &desc.params.get_cgu_info; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *cgu_id = le32_to_cpu(cmd->cgu_id); + *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); + *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); + } + + return status; +} + +/** + * ice_aq_set_phy_rec_clk_out - set RCLK phy out + * @hw: pointer to the HW struct + * @phy_output: PHY reference clock output pin + * @enable: GPIO state to be applied + * @freq: PHY output frequency + * + * Set phy recovered clock as reference (0x0630) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, + u32 *freq) +{ + struct ice_aqc_set_phy_rec_clk_out *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); + cmd = &desc.params.set_phy_rec_clk_out; + cmd->phy_output = phy_output; + cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; + cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; + cmd->freq = cpu_to_le32(*freq); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + *freq = le32_to_cpu(cmd->freq); + + return status; +} + +/** + * ice_aq_get_phy_rec_clk_out - get phy recovered signal info + * @hw: pointer to the HW struct + * @phy_output: PHY reference clock output pin + * @port_num: Port number + * @flags: PHY flags + * @node_handle: PHY output frequency + * + * Get PHY recovered clock output info (0x0631) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, + u8 *flags, u16 *node_handle) +{ + struct ice_aqc_get_phy_rec_clk_out *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); + cmd = &desc.params.get_phy_rec_clk_out; + cmd->phy_output = *phy_output; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *phy_output = cmd->phy_output; + if (port_num) + *port_num = cmd->port_num; + if (flags) + *flags = cmd->flags; + if (node_handle) + *node_handle = le16_to_cpu(cmd->node_handle); + } + return status; } @@ -5267,81 +5807,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } -/** - * ice_aq_set_driver_param - Set driver parameter to share via firmware - * @hw: pointer to the HW struct - * @idx: parameter index to set - * @value: the value to set the parameter to - * @cd: pointer to command details structure or NULL - * - * Set the value of one of the software defined parameters. All PFs connected - * to this device can read the value using ice_aq_get_driver_param. - * - * Note that firmware provides no synchronization or locking, and will not - * save the parameter value during a device reset. It is expected that - * a single PF will write the parameter value, while all other PFs will only - * read it. - */ -int -ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 value, struct ice_sq_cd *cd) -{ - struct ice_aqc_driver_shared_params *cmd; - struct ice_aq_desc desc; - - if (idx >= ICE_AQC_DRIVER_PARAM_MAX) - return -EIO; - - cmd = &desc.params.drv_shared_params; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); - - cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; - cmd->param_indx = idx; - cmd->param_val = cpu_to_le32(value); - - return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); -} - -/** - * ice_aq_get_driver_param - Get driver parameter shared via firmware - * @hw: pointer to the HW struct - * @idx: parameter index to set - * @value: storage to return the shared parameter - * @cd: pointer to command details structure or NULL - * - * Get the value of one of the software defined parameters. - * - * Note that firmware provides no synchronization or locking. It is expected - * that only a single PF will write a given parameter. - */ -int -ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 *value, struct ice_sq_cd *cd) -{ - struct ice_aqc_driver_shared_params *cmd; - struct ice_aq_desc desc; - int status; - - if (idx >= ICE_AQC_DRIVER_PARAM_MAX) - return -EIO; - - cmd = &desc.params.drv_shared_params; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); - - cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; - cmd->param_indx = idx; - - status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); - if (status) - return status; - - *value = le32_to_cpu(cmd->param_val); - - return 0; -} - /** * ice_aq_set_gpio * @hw: pointer to the hw struct @@ -5643,6 +6108,7 @@ static const u32 ice_aq_to_link_speed[] = { SPEED_40000, SPEED_50000, SPEED_100000, /* BIT(10) */ + SPEED_200000, }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 226b81f97a..31fdcac339 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -93,6 +93,13 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); bool ice_is_pf_c827(struct ice_hw *hw); +bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw); +bool ice_is_clock_mux_in_netlist(struct ice_hw *hw); +bool ice_is_cgu_in_netlist(struct ice_hw *hw); +bool ice_is_gps_in_netlist(struct ice_hw *hw); +int +ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle); int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); @@ -196,6 +203,44 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); +int +ice_aq_get_cgu_abilities(struct ice_hw *hw, + struct ice_aqc_get_cgu_abilities *abilities); +int +ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, + u32 freq, s32 phase_delay); +int +ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, + u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay); +int +ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, + u8 src_sel, u32 freq, s32 phase_delay); +int +ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, + u8 *src_sel, u32 *freq, u32 *src_freq); +int +ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, + u8 *dpll_state, u8 *config, s64 *phase_offset, + u8 *eec_mode); +int +ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, + u8 config, u8 eec_mode); +int +ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 ref_priority); +int +ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 *ref_prio); +int +ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, + u32 *cgu_fw_ver); + +int +ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, + u32 *freq); +int +ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, + u8 *flags, u16 *node_handle); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); @@ -208,12 +253,6 @@ int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); int -ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 value, struct ice_sq_cd *cd); -int -ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 *value, struct ice_sq_cd *cd); -int ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, struct ice_sq_cd *cd); int diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index b27ec93638..8b7504a9df 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -1201,23 +1201,120 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, } /** - * ice_dwnld_cfg_bufs + * ice_get_pkg_seg_by_idx + * @pkg_hdr: pointer to the package header to be searched + * @idx: index of segment + */ +static struct ice_generic_seg_hdr * +ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + if (idx < le32_to_cpu(pkg_hdr->seg_count)) + return (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + + le32_to_cpu(pkg_hdr->seg_offset[idx])); + + return NULL; +} + +/** + * ice_is_signing_seg_at_idx - determine if segment is a signing segment + * @pkg_hdr: pointer to package header + * @idx: segment index + */ +static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg; + + seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) + return false; + + return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; +} + +/** + * ice_is_signing_seg_type_at_idx + * @pkg_hdr: pointer to package header + * @idx: segment index + * @seg_id: segment id that is expected + * @sign_type: signing type + * + * Determine if a segment is a signing segment of the correct type + */ +static bool +ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, + u32 seg_id, u32 sign_type) +{ + struct ice_sign_seg *seg; + + if (!ice_is_signing_seg_at_idx(pkg_hdr, idx)) + return false; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + + if (seg && le32_to_cpu(seg->seg_id) == seg_id && + le32_to_cpu(seg->sign_type) == sign_type) + return true; + + return false; +} + +/** + * ice_is_buffer_metadata - determine if package buffer is a metadata buffer + * @buf: pointer to buffer header + */ +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) +{ + if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) + return true; + + return false; +} + +/** + * ice_is_last_download_buffer + * @buf: pointer to current buffer header + * @idx: index of the buffer in the current sequence + * @count: the buffer count in the current sequence + * + * Note: this routine should only be called if the buffer is not the last buffer + */ +static bool +ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) +{ + struct ice_buf *next_buf; + + if ((idx + 1) == count) + return true; + + /* A set metadata flag in the next buffer will signal that the current + * buffer will be the last buffer downloaded + */ + next_buf = ((struct ice_buf *)buf) + 1; + + return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); +} + +/** + * ice_dwnld_cfg_bufs_no_lock * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array + * @start: buffer index of first buffer to download + * @count: the number of buffers to download + * @indicate_last: if true, then set last buffer flag on last buffer download * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. + * Downloads package configuration buffers to the firmware. Metadata buffers + * are skipped, and the first metadata buffer found indicates that the rest + * of the buffers are all metadata buffers. */ -static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, - struct ice_buf *bufs, u32 count) +static enum ice_ddp_state +ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, + u32 count, bool indicate_last) { enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_buf_hdr *bh; enum ice_aq_err err; u32 offset, info, i; - int status; if (!bufs || !count) return ICE_DDP_PKG_ERR; @@ -1226,43 +1323,25 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, * then there are no buffers to be downloaded, and the operation is * considered a success. */ - bh = (struct ice_buf_hdr *)bufs; + bh = (struct ice_buf_hdr *)(bufs + start); if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) return ICE_DDP_PKG_SUCCESS; - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); - if (status) { - if (status == -EALREADY) - return ICE_DDP_PKG_ALREADY_LOADED; - return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); - } - for (i = 0; i < count; i++) { - bool last = ((i + 1) == count); + bool last = false; + int status; - if (!last) { - /* check next buffer for metadata flag */ - bh = (struct ice_buf_hdr *)(bufs + i + 1); + bh = (struct ice_buf_hdr *)(bufs + start + i); - /* A set metadata flag in the next buffer will signal - * that the current buffer will be the last buffer - * downloaded - */ - if (le16_to_cpu(bh->section_count)) - if (le32_to_cpu(bh->section_entry[0].type) & - ICE_METADATA_BUF) - last = true; - } - - bh = (struct ice_buf_hdr *)(bufs + i); + if (indicate_last) + last = ice_is_last_download_buffer(bh, i, count); status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, &offset, &info, NULL); /* Save AQ status from download package */ if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Pkg download failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); err = hw->adminq.sq_last_status; state = ice_map_aq_err_to_ddp_state(err); @@ -1273,50 +1352,231 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, break; } - if (!status) { - status = ice_set_vlan_mode(hw); - if (status) - ice_debug(hw, ICE_DBG_PKG, - "Failed to set VLAN mode: err %d\n", status); + return state; +} + +/** + * ice_download_pkg_sig_seg - download a signature segment + * @hw: pointer to the hardware structure + * @seg: pointer to signature segment + */ +static enum ice_ddp_state +ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) +{ + return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, + le32_to_cpu(seg->buf_tbl.buf_count), + false); +} + +/** + * ice_download_pkg_config_seg - download a config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index + * @start: starting buffer + * @count: buffer count + * + * Note: idx must reference a ICE segment + */ +static enum ice_ddp_state +ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx, u32 start, u32 count) +{ + struct ice_buf_table *bufs; + struct ice_seg *seg; + u32 buf_count; + + seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) + return ICE_DDP_PKG_ERR; + + bufs = ice_find_buf_table(seg); + buf_count = le32_to_cpu(bufs->buf_count); + + if (start >= buf_count || start + count > buf_count) + return ICE_DDP_PKG_ERR; + + return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, + true); +} + +/** + * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index (must be a signature segment) + * + * Note: idx must reference a signature segment + */ +static enum ice_ddp_state +ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx) +{ + enum ice_ddp_state state; + struct ice_sign_seg *seg; + u32 conf_idx; + u32 start; + u32 count; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) { + state = ICE_DDP_PKG_ERR; + goto exit; + } + + conf_idx = le32_to_cpu(seg->signed_seg_idx); + start = le32_to_cpu(seg->signed_buf_start); + count = le32_to_cpu(seg->signed_buf_count); + + state = ice_download_pkg_sig_seg(hw, seg); + if (state) + goto exit; + + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, + count); + +exit: + return state; +} + +/** + * ice_match_signing_seg - determine if a matching signing segment exists + * @pkg_hdr: pointer to package header + * @seg_id: segment id that is expected + * @sign_type: signing type + */ +static bool +ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) +{ + u32 i; + + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, + sign_type)) + return true; + } + + return false; +} + +/** + * ice_post_dwnld_pkg_actions - perform post download package actions + * @hw: pointer to the hardware structure + */ +static enum ice_ddp_state +ice_post_dwnld_pkg_actions(struct ice_hw *hw) +{ + int status; + + status = ice_set_vlan_mode(hw); + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", + status); + return ICE_DDP_PKG_ERR; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_download_pkg_with_sig_seg + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state +ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + enum ice_aq_err aq_err = hw->adminq.sq_last_status; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; + int status; + u32 i; + + ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); + ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + state = ICE_DDP_PKG_ALREADY_LOADED; + else + state = ice_map_aq_err_to_ddp_state(aq_err); + return state; + } + + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, + hw->pkg_sign_type)) + continue; + + state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); + if (state) + break; } + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + ice_release_global_cfg_lock(hw); return state; } /** - * ice_aq_get_pkg_info_list + * ice_dwnld_cfg_bufs * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array * - * Get Package Info List (0x0C43) + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. */ -static int ice_aq_get_pkg_info_list(struct ice_hw *hw, - struct ice_aqc_get_pkg_info_resp *pkg_info, - u16 buf_size, struct ice_sq_cd *cd) +static enum ice_ddp_state +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - struct ice_aq_desc desc; + enum ice_ddp_state state; + struct ice_buf_hdr *bh; + int status; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + if (!bufs || !count) + return ICE_DDP_PKG_ERR; - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; } /** - * ice_download_pkg + * ice_download_pkg_without_sig_seg * @hw: pointer to the hardware structure * @ice_seg: pointer to the segment of the package to be downloaded * - * Handles the download of a complete package. + * Handles the download of a complete package without signature segment. */ -static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, - struct ice_seg *ice_seg) +static enum ice_ddp_state +ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; - int status; ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", ice_seg->hdr.seg_format_ver.major, @@ -1333,12 +1593,52 @@ static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", le32_to_cpu(ice_buf_tbl->buf_count)); - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - le32_to_cpu(ice_buf_tbl->buf_count)); + return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state +ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + struct ice_seg *ice_seg) +{ + enum ice_ddp_state state; + + if (hw->pkg_has_signing_seg) + state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); + else + state = ice_download_pkg_without_sig_seg(hw, ice_seg); ice_post_pkg_dwnld_vlan_mode_cfg(hw); - return status; + return state; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); } /** @@ -1497,6 +1797,73 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, return NULL; } +/** + * ice_has_signing_seg - determine if package has a signing segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + */ +static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); + + return seg_hdr ? true : false; +} + +/** + * ice_get_pkg_segment_id - get correct package segment id, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) +{ + u32 seg_id; + + switch (mac_type) { + case ICE_MAC_E830: + seg_id = SEGMENT_TYPE_ICE_E830; + break; + case ICE_MAC_GENERIC: + default: + seg_id = SEGMENT_TYPE_ICE_E810; + break; + } + + return seg_id; +} + +/** + * ice_get_pkg_sign_type - get package segment sign type, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) +{ + u32 sign_type; + + switch (mac_type) { + case ICE_MAC_E830: + sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB; + break; + case ICE_MAC_GENERIC: + default: + sign_type = SEGMENT_SIGN_TYPE_RSA2K; + break; + } + + return sign_type; +} + +/** + * ice_get_signing_req - get correct package requirements, based on device + * @hw: pointer to the hardware structure + */ +static void ice_get_signing_req(struct ice_hw *hw) +{ + hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); + hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); +} + /** * ice_init_pkg_info * @hw: pointer to the hardware structure @@ -1512,7 +1879,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, if (!pkg_hdr) return ICE_DDP_PKG_ERR; - seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); + ice_get_signing_req(hw); + + ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", + hw->pkg_seg_id); + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); if (seg_hdr) { struct ice_meta_sect *meta; struct ice_pkg_enum state; @@ -1560,21 +1934,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, */ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) { - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_aqc_get_pkg_info_resp *pkg_info; - u16 size; + DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info, + ICE_PKG_CNT); + u16 size = __struct_size(pkg_info); u32 i; - size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); - pkg_info = kzalloc(size, GFP_KERNEL); - if (!pkg_info) + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) return ICE_DDP_PKG_ERR; - if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { - state = ICE_DDP_PKG_ERR; - goto init_pkg_free_alloc; - } - for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { #define ICE_PKG_FLAG_COUNT 4 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; @@ -1604,10 +1971,7 @@ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) pkg_info->pkg_info[i].name, flags); } -init_pkg_free_alloc: - kfree(pkg_info); - - return state; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1622,9 +1986,10 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, struct ice_seg **seg) { - struct ice_aqc_get_pkg_info_resp *pkg; + DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info, + ICE_PKG_CNT); + u16 size = __struct_size(pkg); enum ice_ddp_state state; - u16 size; u32 i; /* Check package version compatibility */ @@ -1635,7 +2000,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, } /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); @@ -1643,15 +2008,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, } /* Check if FW is compatible with the OS package */ - size = struct_size(pkg, pkg_info, ICE_PKG_CNT); - pkg = kzalloc(size, GFP_KERNEL); - if (!pkg) - return ICE_DDP_PKG_ERR; - - if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { - state = ICE_DDP_PKG_LOAD_ERROR; - goto fw_ddp_compat_free_alloc; - } + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) + return ICE_DDP_PKG_LOAD_ERROR; for (i = 0; i < le32_to_cpu(pkg->count); i++) { /* loop till we find the NVM package */ @@ -1668,8 +2026,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, /* done processing NVM package so break */ break; } -fw_ddp_compat_free_alloc: - kfree(pkg); + return state; } @@ -1809,6 +2166,11 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) if (state) return state; + /* must be a matching segment */ + if (hw->pkg_has_signing_seg && + !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type)) + return ICE_DDP_PKG_ERR; + /* before downloading the package, check package version for * compatibility with driver */ @@ -1818,7 +2180,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); - state = ice_download_pkg(hw, seg); + state = ice_download_pkg(hw, pkg, seg); if (state == ICE_DDP_PKG_ALREADY_LOADED) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index abb5f32f2e..ff66c2ffb1 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -98,10 +98,21 @@ struct ice_pkg_hdr { __le32 seg_offset[]; }; +/* Package signing algorithm types */ +#define SEGMENT_SIGN_TYPE_INVALID 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */ +#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005 + /* generic segment */ struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_ICE 0x00000010 +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE_E810 0x00000010 +#define SEGMENT_TYPE_SIGNING 0x00001001 +#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020 +#define SEGMENT_TYPE_ICE_E830 0x00000017 __le32 seg_type; struct ice_pkg_ver seg_format_ver; __le32 seg_size; @@ -163,6 +174,18 @@ struct ice_global_metadata_seg { #define ICE_MIN_S_SZ 1 #define ICE_MAX_S_SZ 4084 +struct ice_sign_seg { + struct ice_generic_seg_hdr hdr; + __le32 seg_id; + __le32 sign_type; + __le32 signed_seg_idx; + __le32 signed_buf_start; + __le32 signed_buf_count; +#define ICE_SIGN_SEG_RESERVED_COUNT 44 + u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; + struct ice_buf_table buf_tbl; +}; + /* section information */ struct ice_section_entry { __le32 type; diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 6d560d1c74..a2d384dbfc 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ #ifndef _ICE_DEVIDS_H_ #define _ICE_DEVIDS_H_ @@ -16,6 +16,14 @@ #define ICE_DEV_ID_E823L_1GBE 0x124F /* Intel(R) Ethernet Connection E823-L for QSFP */ #define ICE_DEV_ID_E823L_QSFP 0x151D +/* Intel(R) Ethernet Controller E830-C for backplane */ +#define ICE_DEV_ID_E830_BACKPLANE 0x12D1 +/* Intel(R) Ethernet Controller E830-C for QSFP */ +#define ICE_DEV_ID_E830_QSFP56 0x12D2 +/* Intel(R) Ethernet Controller E830-C for SFP */ +#define ICE_DEV_ID_E830_SFP 0x12D3 +/* Intel(R) Ethernet Controller E830-C for SFP-DD */ +#define ICE_DEV_ID_E830_SFP_DD 0x12D4 /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c new file mode 100644 index 0000000000..86b180cb32 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -0,0 +1,2117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_trace.h" +#include + +#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50 +#define ICE_DPLL_PIN_IDX_INVALID 0xff +#define ICE_DPLL_RCLK_NUM_PER_PF 1 + +/** + * enum ice_dpll_pin_type - enumerate ice pin types: + * @ICE_DPLL_PIN_INVALID: invalid pin type + * @ICE_DPLL_PIN_TYPE_INPUT: input pin + * @ICE_DPLL_PIN_TYPE_OUTPUT: output pin + * @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin + */ +enum ice_dpll_pin_type { + ICE_DPLL_PIN_INVALID, + ICE_DPLL_PIN_TYPE_INPUT, + ICE_DPLL_PIN_TYPE_OUTPUT, + ICE_DPLL_PIN_TYPE_RCLK_INPUT, +}; + +static const char * const pin_type_name[] = { + [ICE_DPLL_PIN_TYPE_INPUT] = "input", + [ICE_DPLL_PIN_TYPE_OUTPUT] = "output", + [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input", +}; + +/** + * ice_dpll_pin_freq_set - set pin's frequency + * @pf: private board structure + * @pin: pointer to a pin + * @pin_type: type of pin being configured + * @freq: frequency to be set + * @extack: error reporting + * + * Set requested frequency on a pin. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - success + * * negative - error on AQ or wrong pin type given + */ +static int +ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, const u32 freq, + struct netlink_ext_ack *extack) +{ + u8 flags; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + flags = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ; + ret = ice_aq_set_input_pin_cfg(&pf->hw, pin->idx, flags, + pin->flags[0], freq, 0); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ; + ret = ice_aq_set_output_pin_cfg(&pf->hw, pin->idx, flags, + 0, freq, 0); + break; + default: + return -EINVAL; + } + if (ret) { + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin freq:%u on pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + freq, pin->idx); + return ret; + } + pin->freq = freq; + + return 0; +} + +/** + * ice_dpll_frequency_set - wrapper for pin callback for set frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: frequency to be set + * @extack: error reporting + * @pin_type: type of pin being configured + * + * Wraps internal set frequency command on a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't set in hw + */ +static int +ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const u32 frequency, + struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_input_frequency_set - input pin callback for set frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: frequency to be set + * @extack: error reporting + * + * Wraps internal set frequency command on a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't set in hw + */ +static int +ice_dpll_input_frequency_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_output_frequency_set - output pin callback for set frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: frequency to be set + * @extack: error reporting + * + * Wraps internal set frequency command on a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't set in hw + */ +static int +ice_dpll_output_frequency_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_frequency_get - wrapper for pin callback for get frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * @pin_type: type of pin being configured + * + * Wraps internal get frequency command of a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't get from hw + */ +static int +ice_dpll_frequency_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *frequency = p->freq; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_input_frequency_get - input pin callback for get frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * + * Wraps internal get frequency command of a input pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't get from hw + */ +static int +ice_dpll_input_frequency_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_output_frequency_get - output pin callback for get frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * + * Wraps internal get frequency command of a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't get from hw + */ +static int +ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_pin_enable - enable a pin on dplls + * @hw: board private hw structure + * @pin: pointer to a pin + * @pin_type: type of pin being enabled + * @extack: error reporting + * + * Enable a pin on both dplls. Store current state in pin->flags. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - OK + * * negative - error + */ +static int +ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, + struct netlink_ext_ack *extack) +{ + u8 flags = 0; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); + break; + default: + return -EINVAL; + } + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to enable %s pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + + return ret; +} + +/** + * ice_dpll_pin_disable - disable a pin on dplls + * @hw: board private hw structure + * @pin: pointer to a pin + * @pin_type: type of pin being disabled + * @extack: error reporting + * + * Disable a pin on both dplls. Store current state in pin->flags. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - OK + * * negative - error + */ +static int +ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, + struct netlink_ext_ack *extack) +{ + u8 flags = 0; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); + break; + default: + return -EINVAL; + } + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to disable %s pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + + return ret; +} + +/** + * ice_dpll_pin_state_update - update pin's state + * @pf: private board struct + * @pin: structure with pin attributes to be updated + * @pin_type: type of pin being updated + * @extack: error reporting + * + * Determine pin current state and frequency, then update struct + * holding the pin info. For input pin states are separated for each + * dpll, for rclk pins states are separated for each parent. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - OK + * * negative - error + */ +static int +ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, + struct netlink_ext_ack *extack) +{ + u8 parent, port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, + NULL, &pin->flags[0], + &pin->freq, NULL); + if (ret) + goto err; + if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) { + if (pin->pin) { + pin->state[pf->dplls.eec.dpll_idx] = + pin->pin == pf->dplls.eec.active_input ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_SELECTABLE; + pin->state[pf->dplls.pps.dpll_idx] = + pin->pin == pf->dplls.pps.active_input ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_SELECTABLE; + } else { + pin->state[pf->dplls.eec.dpll_idx] = + DPLL_PIN_STATE_SELECTABLE; + pin->state[pf->dplls.pps.dpll_idx] = + DPLL_PIN_STATE_SELECTABLE; + } + } else { + pin->state[pf->dplls.eec.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + } + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx, + &pin->flags[0], NULL, + &pin->freq, NULL); + if (ret) + goto err; + if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) + pin->state[0] = DPLL_PIN_STATE_CONNECTED; + else + pin->state[0] = DPLL_PIN_STATE_DISCONNECTED; + break; + case ICE_DPLL_PIN_TYPE_RCLK_INPUT: + for (parent = 0; parent < pf->dplls.rclk.num_parents; + parent++) { + u8 p = parent; + + ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p, + &port_num, + &pin->flags[parent], + NULL); + if (ret) + goto err; + if (ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN & + pin->flags[parent]) + pin->state[parent] = DPLL_PIN_STATE_CONNECTED; + else + pin->state[parent] = + DPLL_PIN_STATE_DISCONNECTED; + } + break; + default: + return -EINVAL; + } + + return 0; +err: + if (extack) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to update %s pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + else + dev_err_ratelimited(ice_pf_to_dev(pf), + "err:%d %s failed to update %s pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + return ret; +} + +/** + * ice_dpll_hw_input_prio_set - set input priority value in hardware + * @pf: board private structure + * @dpll: ice dpll pointer + * @pin: ice pin pointer + * @prio: priority value being set on a dpll + * @extack: error reporting + * + * Internal wrapper for setting the priority in the hardware. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll, + struct ice_dpll_pin *pin, const u32 prio, + struct netlink_ext_ack *extack) +{ + int ret; + + ret = ice_aq_set_cgu_ref_prio(&pf->hw, dpll->dpll_idx, pin->idx, + (u8)prio); + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin prio:%u on pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + prio, pin->idx); + else + dpll->input_prio[pin->idx] = prio; + + return ret; +} + +/** + * ice_dpll_lock_status_get - get dpll lock status callback + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @status: on success holds dpll's lock status + * @extack: error reporting + * + * Dpll subsystem callback, provides dpll's lock status. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_lock_status *status, + struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *status = d->dpll_state; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_mode_supported - check if dpll's working mode is supported + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @mode: mode to be checked for support + * @extack: error reporting + * + * Dpll subsystem callback. Provides information if working mode is supported + * by dpll. + * + * Return: + * * true - mode is supported + * * false - mode is not supported + */ +static bool ice_dpll_mode_supported(const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_mode mode, + struct netlink_ext_ack *extack) +{ + if (mode == DPLL_MODE_AUTOMATIC) + return true; + + return false; +} + +/** + * ice_dpll_mode_get - get dpll's working mode + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @mode: on success holds current working mode of dpll + * @extack: error reporting + * + * Dpll subsystem callback. Provides working mode of dpll. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_mode *mode, + struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *mode = d->mode; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_pin_state_set - set pin's state on dpll + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @enable: if pin shalll be enabled + * @extack: error reporting + * @pin_type: type of a pin + * + * Set pin state on a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - OK or no change required + * * negative - error + */ +static int +ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + bool enable, struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + if (enable) + ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack); + else + ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack); + if (!ret) + ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_output_state_set - enable/disable output pin on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: dpll being configured + * @dpll_priv: private data pointer passed on dpll registration + * @state: state of pin to be set + * @extack: error reporting + * + * Dpll subsystem callback. Set given state on output type pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - successfully enabled mode + * * negative - failed to enable mode + */ +static int +ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + bool enable = state == DPLL_PIN_STATE_CONNECTED; + + return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_input_state_set - enable/disable input pin on dpll levice + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: dpll being configured + * @dpll_priv: private data pointer passed on dpll registration + * @state: state of pin to be set + * @extack: error reporting + * + * Dpll subsystem callback. Enables given mode on input type pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - successfully enabled mode + * * negative - failed to enable mode + */ +static int +ice_dpll_input_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + bool enable = state == DPLL_PIN_STATE_SELECTABLE; + + return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_pin_state_get - set pin's state on dpll + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds state of the pin + * @extack: error reporting + * @pin_type: type of questioned pin + * + * Determine pin state set it on a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int +ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); + if (ret) + goto unlock; + if (pin_type == ICE_DPLL_PIN_TYPE_INPUT) + *state = p->state[d->dpll_idx]; + else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) + *state = p->state[0]; + ret = 0; +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_output_state_get - get output pin state on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds state of the pin + * @extack: error reporting + * + * Dpll subsystem callback. Check state of a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int +ice_dpll_output_state_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_input_state_get - get input pin state on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds state of the pin + * @extack: error reporting + * + * Dpll subsystem callback. Check state of a input pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int +ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_input_prio_get - get dpll's input prio + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @prio: on success - returns input priority on dpll + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting priority of a input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_input_prio_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 *prio, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *prio = d->input_prio[p->idx]; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_input_prio_set - set dpll input prio + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @prio: input priority to be set on dpll + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting priority of a input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 prio, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_input_direction - callback for get input pin direction + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @direction: holds input pin direction + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting direction of a input pin. + * + * Return: + * * 0 - success + */ +static int +ice_dpll_input_direction(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack) +{ + *direction = DPLL_PIN_DIRECTION_INPUT; + + return 0; +} + +/** + * ice_dpll_output_direction - callback for get output pin direction + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @direction: holds output pin direction + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting direction of an output pin. + * + * Return: + * * 0 - success + */ +static int +ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack) +{ + *direction = DPLL_PIN_DIRECTION_OUTPUT; + + return 0; +} + +/** + * ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: on success holds pin phase_adjust value + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting phase adjust value of a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_pin_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 *phase_adjust, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_pf *pf = p->pf; + + mutex_lock(&pf->dplls.lock); + *phase_adjust = p->phase_adjust; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_pin_phase_adjust_set - helper for setting a pin phase adjust value + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: phase_adjust to be set + * @extack: error reporting + * @type: type of a pin + * + * Helper for dpll subsystem callback. Handler for setting phase adjust value + * of a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 phase_adjust, + struct netlink_ext_ack *extack, + enum ice_dpll_pin_type type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + u8 flag, flags_en = 0; + int ret; + + mutex_lock(&pf->dplls.lock); + switch (type) { + case ICE_DPLL_PIN_TYPE_INPUT: + flag = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY; + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) + flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN) + flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, flag, flags_en, + 0, phase_adjust); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + flag = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE; + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN) + flag |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) + flag |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flag, 0, 0, + phase_adjust); + break; + default: + ret = -EINVAL; + } + if (!ret) + p->phase_adjust = phase_adjust; + mutex_unlock(&pf->dplls.lock); + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + phase_adjust, p->idx, d->dpll_idx); + + return ret; +} + +/** + * ice_dpll_input_phase_adjust_set - callback for set input pin phase adjust + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: phase_adjust to be set + * @extack: error reporting + * + * Dpll subsystem callback. Wraps a handler for setting phase adjust on input + * pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_input_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 phase_adjust, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv, + phase_adjust, extack, + ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_output_phase_adjust_set - callback for set output pin phase adjust + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: phase_adjust to be set + * @extack: error reporting + * + * Dpll subsystem callback. Wraps a handler for setting phase adjust on output + * pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 phase_adjust, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv, + phase_adjust, extack, + ICE_DPLL_PIN_TYPE_OUTPUT); +} + +#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100 +#define ICE_DPLL_PHASE_OFFSET_FACTOR \ + (DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER) +/** + * ice_dpll_phase_offset_get - callback for get dpll phase shift value + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_offset: on success holds pin phase_offset value + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting phase shift value between + * dpll's input and output. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s64 *phase_offset, struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + if (d->active_input == pin) + *phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR; + else + *phase_offset = 0; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @parent_pin: pin parent pointer + * @parent_pin_priv: parent private data pointer passed on pin registration + * @state: state to be set on pin + * @extack: error reporting + * + * Dpll subsystem callback, set a state of a rclk pin on a parent pin + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv; + bool enable = state == DPLL_PIN_STATE_CONNECTED; + struct ice_pf *pf = p->pf; + int ret = -EINVAL; + u32 hw_idx; + + mutex_lock(&pf->dplls.lock); + hw_idx = parent->idx - pf->dplls.base_rclk_idx; + if (hw_idx >= pf->dplls.num_inputs) + goto unlock; + + if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) || + (!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) { + NL_SET_ERR_MSG_FMT(extack, + "pin:%u state:%u on parent:%u already set", + p->idx, state, parent->idx); + goto unlock; + } + ret = ice_aq_set_phy_rec_clk_out(&pf->hw, hw_idx, enable, + &p->freq); + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + state, p->idx, parent->idx); +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_rclk_state_on_pin_get - get a state of rclk pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @parent_pin: pin parent pointer + * @parent_pin_priv: pin parent priv data pointer passed on pin registration + * @state: on success holds pin state on parent pin + * @extack: error reporting + * + * dpll subsystem callback, get a state of a recovered clock pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv; + struct ice_pf *pf = p->pf; + int ret = -EINVAL; + u32 hw_idx; + + mutex_lock(&pf->dplls.lock); + hw_idx = parent->idx - pf->dplls.base_rclk_idx; + if (hw_idx >= pf->dplls.num_inputs) + goto unlock; + + ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT, + extack); + if (ret) + goto unlock; + + *state = p->state[hw_idx]; + ret = 0; +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +static const struct dpll_pin_ops ice_dpll_rclk_ops = { + .state_on_pin_set = ice_dpll_rclk_state_on_pin_set, + .state_on_pin_get = ice_dpll_rclk_state_on_pin_get, + .direction_get = ice_dpll_input_direction, +}; + +static const struct dpll_pin_ops ice_dpll_input_ops = { + .frequency_get = ice_dpll_input_frequency_get, + .frequency_set = ice_dpll_input_frequency_set, + .state_on_dpll_get = ice_dpll_input_state_get, + .state_on_dpll_set = ice_dpll_input_state_set, + .prio_get = ice_dpll_input_prio_get, + .prio_set = ice_dpll_input_prio_set, + .direction_get = ice_dpll_input_direction, + .phase_adjust_get = ice_dpll_pin_phase_adjust_get, + .phase_adjust_set = ice_dpll_input_phase_adjust_set, + .phase_offset_get = ice_dpll_phase_offset_get, +}; + +static const struct dpll_pin_ops ice_dpll_output_ops = { + .frequency_get = ice_dpll_output_frequency_get, + .frequency_set = ice_dpll_output_frequency_set, + .state_on_dpll_get = ice_dpll_output_state_get, + .state_on_dpll_set = ice_dpll_output_state_set, + .direction_get = ice_dpll_output_direction, + .phase_adjust_get = ice_dpll_pin_phase_adjust_get, + .phase_adjust_set = ice_dpll_output_phase_adjust_set, +}; + +static const struct dpll_device_ops ice_dpll_ops = { + .lock_status_get = ice_dpll_lock_status_get, + .mode_supported = ice_dpll_mode_supported, + .mode_get = ice_dpll_mode_get, +}; + +/** + * ice_generate_clock_id - generates unique clock_id for registering dpll. + * @pf: board private structure + * + * Generates unique (per board) clock_id for allocation and search of dpll + * devices in Linux dpll subsystem. + * + * Return: generated clock id for the board + */ +static u64 ice_generate_clock_id(struct ice_pf *pf) +{ + return pci_get_dsn(pf->pdev); +} + +/** + * ice_dpll_notify_changes - notify dpll subsystem about changes + * @d: pointer do dpll + * + * Once change detected appropriate event is submitted to the dpll subsystem. + */ +static void ice_dpll_notify_changes(struct ice_dpll *d) +{ + bool pin_notified = false; + + if (d->prev_dpll_state != d->dpll_state) { + d->prev_dpll_state = d->dpll_state; + dpll_device_change_ntf(d->dpll); + } + if (d->prev_input != d->active_input) { + if (d->prev_input) + dpll_pin_change_ntf(d->prev_input); + d->prev_input = d->active_input; + if (d->active_input) { + dpll_pin_change_ntf(d->active_input); + pin_notified = true; + } + } + if (d->prev_phase_offset != d->phase_offset) { + d->prev_phase_offset = d->phase_offset; + if (!pin_notified && d->active_input) + dpll_pin_change_ntf(d->active_input); + } +} + +/** + * ice_dpll_update_state - update dpll state + * @pf: pf private structure + * @d: pointer to queried dpll device + * @init: if function called on initialization of ice dpll + * + * Poll current state of dpll from hw and update ice_dpll struct. + * + * Context: Called by kworker under pf->dplls.lock + * Return: + * * 0 - success + * * negative - AQ failure + */ +static int +ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init) +{ + struct ice_dpll_pin *p = NULL; + int ret; + + ret = ice_get_cgu_state(&pf->hw, d->dpll_idx, d->prev_dpll_state, + &d->input_idx, &d->ref_state, &d->eec_mode, + &d->phase_offset, &d->dpll_state); + + dev_dbg(ice_pf_to_dev(pf), + "update dpll=%d, prev_src_idx:%u, src_idx:%u, state:%d, prev:%d mode:%d\n", + d->dpll_idx, d->prev_input_idx, d->input_idx, + d->dpll_state, d->prev_dpll_state, d->mode); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "update dpll=%d state failed, ret=%d %s\n", + d->dpll_idx, ret, + ice_aq_str(pf->hw.adminq.sq_last_status)); + return ret; + } + if (init) { + if (d->dpll_state == DPLL_LOCK_STATUS_LOCKED || + d->dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ) + d->active_input = pf->dplls.inputs[d->input_idx].pin; + p = &pf->dplls.inputs[d->input_idx]; + return ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, NULL); + } + if (d->dpll_state == DPLL_LOCK_STATUS_HOLDOVER || + d->dpll_state == DPLL_LOCK_STATUS_UNLOCKED) { + d->active_input = NULL; + if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID) + p = &pf->dplls.inputs[d->input_idx]; + d->prev_input_idx = ICE_DPLL_PIN_IDX_INVALID; + d->input_idx = ICE_DPLL_PIN_IDX_INVALID; + if (!p) + return 0; + ret = ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, NULL); + } else if (d->input_idx != d->prev_input_idx) { + if (d->prev_input_idx != ICE_DPLL_PIN_IDX_INVALID) { + p = &pf->dplls.inputs[d->prev_input_idx]; + ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, + NULL); + } + if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID) { + p = &pf->dplls.inputs[d->input_idx]; + d->active_input = p->pin; + ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, + NULL); + } + d->prev_input_idx = d->input_idx; + } + + return ret; +} + +/** + * ice_dpll_periodic_work - DPLLs periodic worker + * @work: pointer to kthread_work structure + * + * DPLLs periodic worker is responsible for polling state of dpll. + * Context: Holds pf->dplls.lock + */ +static void ice_dpll_periodic_work(struct kthread_work *work) +{ + struct ice_dplls *d = container_of(work, struct ice_dplls, work.work); + struct ice_pf *pf = container_of(d, struct ice_pf, dplls); + struct ice_dpll *de = &pf->dplls.eec; + struct ice_dpll *dp = &pf->dplls.pps; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_update_state(pf, de, false); + if (!ret) + ret = ice_dpll_update_state(pf, dp, false); + if (ret) { + d->cgu_state_acq_err_num++; + /* stop rescheduling this worker */ + if (d->cgu_state_acq_err_num > + ICE_CGU_STATE_ACQ_ERR_THRESHOLD) { + dev_err(ice_pf_to_dev(pf), + "EEC/PPS DPLLs periodic work disabled\n"); + mutex_unlock(&pf->dplls.lock); + return; + } + } + mutex_unlock(&pf->dplls.lock); + ice_dpll_notify_changes(de); + ice_dpll_notify_changes(dp); + + /* Run twice a second or reschedule if update failed */ + kthread_queue_delayed_work(d->kworker, &d->work, + ret ? msecs_to_jiffies(10) : + msecs_to_jiffies(500)); +} + +/** + * ice_dpll_release_pins - release pins resources from dpll subsystem + * @pins: pointer to pins array + * @count: number of pins + * + * Release resources of given pins array in the dpll subsystem. + */ +static void ice_dpll_release_pins(struct ice_dpll_pin *pins, int count) +{ + int i; + + for (i = 0; i < count; i++) + dpll_pin_put(pins[i].pin); +} + +/** + * ice_dpll_get_pins - get pins from dpll subsystem + * @pf: board private structure + * @pins: pointer to pins array + * @start_idx: get starts from this pin idx value + * @count: number of pins + * @clock_id: clock_id of dpll device + * + * Get pins - allocate - in dpll subsystem, store them in pin field of given + * pins array. + * + * Return: + * * 0 - success + * * negative - allocation failure reason + */ +static int +ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins, + int start_idx, int count, u64 clock_id) +{ + int i, ret; + + for (i = 0; i < count; i++) { + pins[i].pin = dpll_pin_get(clock_id, i + start_idx, THIS_MODULE, + &pins[i].prop); + if (IS_ERR(pins[i].pin)) { + ret = PTR_ERR(pins[i].pin); + goto release_pins; + } + } + + return 0; + +release_pins: + while (--i >= 0) + dpll_pin_put(pins[i].pin); + return ret; +} + +/** + * ice_dpll_unregister_pins - unregister pins from a dpll + * @dpll: dpll device pointer + * @pins: pointer to pins array + * @ops: callback ops registered with the pins + * @count: number of pins + * + * Unregister pins of a given array of pins from given dpll device registered in + * dpll subsystem. + */ +static void +ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins, + const struct dpll_pin_ops *ops, int count) +{ + int i; + + for (i = 0; i < count; i++) + dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); +} + +/** + * ice_dpll_register_pins - register pins with a dpll + * @dpll: dpll pointer to register pins with + * @pins: pointer to pins array + * @ops: callback ops registered with the pins + * @count: number of pins + * + * Register pins of a given array with given dpll in dpll subsystem. + * + * Return: + * * 0 - success + * * negative - registration failure reason + */ +static int +ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins, + const struct dpll_pin_ops *ops, int count) +{ + int ret, i; + + for (i = 0; i < count; i++) { + ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]); + if (ret) + goto unregister_pins; + } + + return 0; + +unregister_pins: + while (--i >= 0) + dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); + return ret; +} + +/** + * ice_dpll_deinit_direct_pins - deinitialize direct pins + * @cgu: if cgu is present and controlled by this NIC + * @pins: pointer to pins array + * @count: number of pins + * @ops: callback ops registered with the pins + * @first: dpll device pointer + * @second: dpll device pointer + * + * If cgu is owned unregister pins from given dplls. + * Release pins resources to the dpll subsystem. + */ +static void +ice_dpll_deinit_direct_pins(bool cgu, struct ice_dpll_pin *pins, int count, + const struct dpll_pin_ops *ops, + struct dpll_device *first, + struct dpll_device *second) +{ + if (cgu) { + ice_dpll_unregister_pins(first, pins, ops, count); + ice_dpll_unregister_pins(second, pins, ops, count); + } + ice_dpll_release_pins(pins, count); +} + +/** + * ice_dpll_init_direct_pins - initialize direct pins + * @pf: board private structure + * @cgu: if cgu is present and controlled by this NIC + * @pins: pointer to pins array + * @start_idx: on which index shall allocation start in dpll subsystem + * @count: number of pins + * @ops: callback ops registered with the pins + * @first: dpll device pointer + * @second: dpll device pointer + * + * Allocate directly connected pins of a given array in dpll subsystem. + * If cgu is owned register allocated pins with given dplls. + * + * Return: + * * 0 - success + * * negative - registration failure reason + */ +static int +ice_dpll_init_direct_pins(struct ice_pf *pf, bool cgu, + struct ice_dpll_pin *pins, int start_idx, int count, + const struct dpll_pin_ops *ops, + struct dpll_device *first, struct dpll_device *second) +{ + int ret; + + ret = ice_dpll_get_pins(pf, pins, start_idx, count, pf->dplls.clock_id); + if (ret) + return ret; + if (cgu) { + ret = ice_dpll_register_pins(first, pins, ops, count); + if (ret) + goto release_pins; + ret = ice_dpll_register_pins(second, pins, ops, count); + if (ret) + goto unregister_first; + } + + return 0; + +unregister_first: + ice_dpll_unregister_pins(first, pins, ops, count); +release_pins: + ice_dpll_release_pins(pins, count); + return ret; +} + +/** + * ice_dpll_deinit_rclk_pin - release rclk pin resources + * @pf: board private structure + * + * Deregister rclk pin from parent pins and release resources in dpll subsystem. + */ +static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf) +{ + struct ice_dpll_pin *rclk = &pf->dplls.rclk; + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct dpll_pin *parent; + int i; + + for (i = 0; i < rclk->num_parents; i++) { + parent = pf->dplls.inputs[rclk->parent_idx[i]].pin; + if (!parent) + continue; + dpll_pin_on_pin_unregister(parent, rclk->pin, + &ice_dpll_rclk_ops, rclk); + } + if (WARN_ON_ONCE(!vsi || !vsi->netdev)) + return; + netdev_dpll_pin_clear(vsi->netdev); + dpll_pin_put(rclk->pin); +} + +/** + * ice_dpll_init_rclk_pins - initialize recovered clock pin + * @pf: board private structure + * @pin: pin to register + * @start_idx: on which index shall allocation start in dpll subsystem + * @ops: callback ops registered with the pins + * + * Allocate resource for recovered clock pin in dpll subsystem. Register the + * pin with the parents it has in the info. Register pin with the pf's main vsi + * netdev. + * + * Return: + * * 0 - success + * * negative - registration failure reason + */ +static int +ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, + int start_idx, const struct dpll_pin_ops *ops) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct dpll_pin *parent; + int ret, i; + + ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF, + pf->dplls.clock_id); + if (ret) + return ret; + for (i = 0; i < pf->dplls.rclk.num_parents; i++) { + parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[i]].pin; + if (!parent) { + ret = -ENODEV; + goto unregister_pins; + } + ret = dpll_pin_on_pin_register(parent, pf->dplls.rclk.pin, + ops, &pf->dplls.rclk); + if (ret) + goto unregister_pins; + } + if (WARN_ON((!vsi || !vsi->netdev))) + return -EINVAL; + netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin); + + return 0; + +unregister_pins: + while (i) { + parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[--i]].pin; + dpll_pin_on_pin_unregister(parent, pf->dplls.rclk.pin, + &ice_dpll_rclk_ops, &pf->dplls.rclk); + } + ice_dpll_release_pins(pin, ICE_DPLL_RCLK_NUM_PER_PF); + return ret; +} + +/** + * ice_dpll_deinit_pins - deinitialize direct pins + * @pf: board private structure + * @cgu: if cgu is controlled by this pf + * + * If cgu is owned unregister directly connected pins from the dplls. + * Release resources of directly connected pins from the dpll subsystem. + */ +static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu) +{ + struct ice_dpll_pin *outputs = pf->dplls.outputs; + struct ice_dpll_pin *inputs = pf->dplls.inputs; + int num_outputs = pf->dplls.num_outputs; + int num_inputs = pf->dplls.num_inputs; + struct ice_dplls *d = &pf->dplls; + struct ice_dpll *de = &d->eec; + struct ice_dpll *dp = &d->pps; + + ice_dpll_deinit_rclk_pin(pf); + if (cgu) { + ice_dpll_unregister_pins(dp->dpll, inputs, &ice_dpll_input_ops, + num_inputs); + ice_dpll_unregister_pins(de->dpll, inputs, &ice_dpll_input_ops, + num_inputs); + } + ice_dpll_release_pins(inputs, num_inputs); + if (cgu) { + ice_dpll_unregister_pins(dp->dpll, outputs, + &ice_dpll_output_ops, num_outputs); + ice_dpll_unregister_pins(de->dpll, outputs, + &ice_dpll_output_ops, num_outputs); + ice_dpll_release_pins(outputs, num_outputs); + } +} + +/** + * ice_dpll_init_pins - init pins and register pins with a dplls + * @pf: board private structure + * @cgu: if cgu is present and controlled by this NIC + * + * Initialize directly connected pf's pins within pf's dplls in a Linux dpll + * subsystem. + * + * Return: + * * 0 - success + * * negative - initialization failure reason + */ +static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu) +{ + u32 rclk_idx; + int ret; + + ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0, + pf->dplls.num_inputs, + &ice_dpll_input_ops, + pf->dplls.eec.dpll, pf->dplls.pps.dpll); + if (ret) + return ret; + if (cgu) { + ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs, + pf->dplls.num_inputs, + pf->dplls.num_outputs, + &ice_dpll_output_ops, + pf->dplls.eec.dpll, + pf->dplls.pps.dpll); + if (ret) + goto deinit_inputs; + } + rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id; + ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx, + &ice_dpll_rclk_ops); + if (ret) + goto deinit_outputs; + + return 0; +deinit_outputs: + ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs, + pf->dplls.num_outputs, + &ice_dpll_output_ops, pf->dplls.pps.dpll, + pf->dplls.eec.dpll); +deinit_inputs: + ice_dpll_deinit_direct_pins(cgu, pf->dplls.inputs, pf->dplls.num_inputs, + &ice_dpll_input_ops, pf->dplls.pps.dpll, + pf->dplls.eec.dpll); + return ret; +} + +/** + * ice_dpll_deinit_dpll - deinitialize dpll device + * @pf: board private structure + * @d: pointer to ice_dpll + * @cgu: if cgu is present and controlled by this NIC + * + * If cgu is owned unregister the dpll from dpll subsystem. + * Release resources of dpll device from dpll subsystem. + */ +static void +ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu) +{ + if (cgu) + dpll_device_unregister(d->dpll, &ice_dpll_ops, d); + dpll_device_put(d->dpll); +} + +/** + * ice_dpll_init_dpll - initialize dpll device in dpll subsystem + * @pf: board private structure + * @d: dpll to be initialized + * @cgu: if cgu is present and controlled by this NIC + * @type: type of dpll being initialized + * + * Allocate dpll instance for this board in dpll subsystem, if cgu is controlled + * by this NIC, register dpll with the callback ops. + * + * Return: + * * 0 - success + * * negative - initialization failure reason + */ +static int +ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu, + enum dpll_type type) +{ + u64 clock_id = pf->dplls.clock_id; + int ret; + + d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE); + if (IS_ERR(d->dpll)) { + ret = PTR_ERR(d->dpll); + dev_err(ice_pf_to_dev(pf), + "dpll_device_get failed (%p) err=%d\n", d, ret); + return ret; + } + d->pf = pf; + if (cgu) { + ice_dpll_update_state(pf, d, true); + ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d); + if (ret) { + dpll_device_put(d->dpll); + return ret; + } + } + + return 0; +} + +/** + * ice_dpll_deinit_worker - deinitialize dpll kworker + * @pf: board private structure + * + * Stop dpll's kworker, release it's resources. + */ +static void ice_dpll_deinit_worker(struct ice_pf *pf) +{ + struct ice_dplls *d = &pf->dplls; + + kthread_cancel_delayed_work_sync(&d->work); + kthread_destroy_worker(d->kworker); +} + +/** + * ice_dpll_init_worker - Initialize DPLLs periodic worker + * @pf: board private structure + * + * Create and start DPLLs periodic worker. + * + * Context: Shall be called after pf->dplls.lock is initialized. + * Return: + * * 0 - success + * * negative - create worker failure + */ +static int ice_dpll_init_worker(struct ice_pf *pf) +{ + struct ice_dplls *d = &pf->dplls; + struct kthread_worker *kworker; + + kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); + kworker = kthread_create_worker(0, "ice-dplls-%s", + dev_name(ice_pf_to_dev(pf))); + if (IS_ERR(kworker)) + return PTR_ERR(kworker); + d->kworker = kworker; + d->cgu_state_acq_err_num = 0; + kthread_queue_delayed_work(d->kworker, &d->work, 0); + + return 0; +} + +/** + * ice_dpll_init_info_direct_pins - initializes direct pins info + * @pf: board private structure + * @pin_type: type of pins being initialized + * + * Init information for directly connected pins, cache them in pf's pins + * structures. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int +ice_dpll_init_info_direct_pins(struct ice_pf *pf, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps; + int num_pins, i, ret = -EINVAL; + struct ice_hw *hw = &pf->hw; + struct ice_dpll_pin *pins; + unsigned long caps; + u8 freq_supp_num; + bool input; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + pins = pf->dplls.inputs; + num_pins = pf->dplls.num_inputs; + input = true; + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + pins = pf->dplls.outputs; + num_pins = pf->dplls.num_outputs; + input = false; + break; + default: + return -EINVAL; + } + + for (i = 0; i < num_pins; i++) { + caps = 0; + pins[i].idx = i; + pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input); + pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input); + if (input) { + ret = ice_aq_get_cgu_ref_prio(hw, de->dpll_idx, i, + &de->input_prio[i]); + if (ret) + return ret; + ret = ice_aq_get_cgu_ref_prio(hw, dp->dpll_idx, i, + &dp->input_prio[i]); + if (ret) + return ret; + caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | + DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE); + pins[i].prop.phase_range.min = + pf->dplls.input_phase_adj_max; + pins[i].prop.phase_range.max = + -pf->dplls.input_phase_adj_max; + } else { + pins[i].prop.phase_range.min = + pf->dplls.output_phase_adj_max; + pins[i].prop.phase_range.max = + -pf->dplls.output_phase_adj_max; + ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps); + if (ret) + return ret; + } + pins[i].prop.capabilities = caps; + ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); + if (ret) + return ret; + pins[i].prop.freq_supported = + ice_cgu_get_pin_freq_supp(hw, i, input, &freq_supp_num); + pins[i].prop.freq_supported_num = freq_supp_num; + pins[i].pf = pf; + } + + return ret; +} + +/** + * ice_dpll_init_info_rclk_pin - initializes rclk pin information + * @pf: board private structure + * + * Init information for rclk pin, cache them in pf->dplls.rclk. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf) +{ + struct ice_dpll_pin *pin = &pf->dplls.rclk; + + pin->prop.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT; + pin->prop.capabilities |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + pin->pf = pf; + + return ice_dpll_pin_state_update(pf, pin, + ICE_DPLL_PIN_TYPE_RCLK_INPUT, NULL); +} + +/** + * ice_dpll_init_pins_info - init pins info wrapper + * @pf: board private structure + * @pin_type: type of pins being initialized + * + * Wraps functions for pin initialization. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int +ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type) +{ + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + case ICE_DPLL_PIN_TYPE_OUTPUT: + return ice_dpll_init_info_direct_pins(pf, pin_type); + case ICE_DPLL_PIN_TYPE_RCLK_INPUT: + return ice_dpll_init_info_rclk_pin(pf); + default: + return -EINVAL; + } +} + +/** + * ice_dpll_deinit_info - release memory allocated for pins info + * @pf: board private structure + * + * Release memory allocated for pins by ice_dpll_init_info function. + */ +static void ice_dpll_deinit_info(struct ice_pf *pf) +{ + kfree(pf->dplls.inputs); + kfree(pf->dplls.outputs); + kfree(pf->dplls.eec.input_prio); + kfree(pf->dplls.pps.input_prio); +} + +/** + * ice_dpll_init_info - prepare pf's dpll information structure + * @pf: board private structure + * @cgu: if cgu is present and controlled by this NIC + * + * Acquire (from HW) and set basic dpll information (on pf->dplls struct). + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int ice_dpll_init_info(struct ice_pf *pf, bool cgu) +{ + struct ice_aqc_get_cgu_abilities abilities; + struct ice_dpll *de = &pf->dplls.eec; + struct ice_dpll *dp = &pf->dplls.pps; + struct ice_dplls *d = &pf->dplls; + struct ice_hw *hw = &pf->hw; + int ret, alloc_size, i; + + d->clock_id = ice_generate_clock_id(pf); + ret = ice_aq_get_cgu_abilities(hw, &abilities); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "err:%d %s failed to read cgu abilities\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); + return ret; + } + + de->dpll_idx = abilities.eec_dpll_idx; + dp->dpll_idx = abilities.pps_dpll_idx; + d->num_inputs = abilities.num_inputs; + d->num_outputs = abilities.num_outputs; + d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj); + d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj); + + alloc_size = sizeof(*d->inputs) * d->num_inputs; + d->inputs = kzalloc(alloc_size, GFP_KERNEL); + if (!d->inputs) + return -ENOMEM; + + alloc_size = sizeof(*de->input_prio) * d->num_inputs; + de->input_prio = kzalloc(alloc_size, GFP_KERNEL); + if (!de->input_prio) + return -ENOMEM; + + dp->input_prio = kzalloc(alloc_size, GFP_KERNEL); + if (!dp->input_prio) + return -ENOMEM; + + ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_INPUT); + if (ret) + goto deinit_info; + + if (cgu) { + alloc_size = sizeof(*d->outputs) * d->num_outputs; + d->outputs = kzalloc(alloc_size, GFP_KERNEL); + if (!d->outputs) { + ret = -ENOMEM; + goto deinit_info; + } + + ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT); + if (ret) + goto deinit_info; + } + + ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx, + &pf->dplls.rclk.num_parents); + if (ret) + return ret; + for (i = 0; i < pf->dplls.rclk.num_parents; i++) + pf->dplls.rclk.parent_idx[i] = d->base_rclk_idx + i; + ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_RCLK_INPUT); + if (ret) + return ret; + de->mode = DPLL_MODE_AUTOMATIC; + dp->mode = DPLL_MODE_AUTOMATIC; + + dev_dbg(ice_pf_to_dev(pf), + "%s - success, inputs:%u, outputs:%u rclk-parents:%u\n", + __func__, d->num_inputs, d->num_outputs, d->rclk.num_parents); + + return 0; + +deinit_info: + dev_err(ice_pf_to_dev(pf), + "%s - fail: d->inputs:%p, de->input_prio:%p, dp->input_prio:%p, d->outputs:%p\n", + __func__, d->inputs, de->input_prio, + dp->input_prio, d->outputs); + ice_dpll_deinit_info(pf); + return ret; +} + +/** + * ice_dpll_deinit - Disable the driver/HW support for dpll subsystem + * the dpll device. + * @pf: board private structure + * + * Handles the cleanup work required after dpll initialization, freeing + * resources and unregistering the dpll, pin and all resources used for + * handling them. + * + * Context: Destroys pf->dplls.lock mutex. Call only if ICE_FLAG_DPLL was set. + */ +void ice_dpll_deinit(struct ice_pf *pf) +{ + bool cgu = ice_is_feature_supported(pf, ICE_F_CGU); + + clear_bit(ICE_FLAG_DPLL, pf->flags); + if (cgu) + ice_dpll_deinit_worker(pf); + + ice_dpll_deinit_pins(pf, cgu); + ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu); + ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu); + ice_dpll_deinit_info(pf); + mutex_destroy(&pf->dplls.lock); +} + +/** + * ice_dpll_init - initialize support for dpll subsystem + * @pf: board private structure + * + * Set up the device dplls, register them and pins connected within Linux dpll + * subsystem. Allow userspace to obtain state of DPLL and handling of DPLL + * configuration requests. + * + * Context: Initializes pf->dplls.lock mutex. + */ +void ice_dpll_init(struct ice_pf *pf) +{ + bool cgu = ice_is_feature_supported(pf, ICE_F_CGU); + struct ice_dplls *d = &pf->dplls; + int err = 0; + + err = ice_dpll_init_info(pf, cgu); + if (err) + goto err_exit; + err = ice_dpll_init_dpll(pf, &pf->dplls.eec, cgu, DPLL_TYPE_EEC); + if (err) + goto deinit_info; + err = ice_dpll_init_dpll(pf, &pf->dplls.pps, cgu, DPLL_TYPE_PPS); + if (err) + goto deinit_eec; + err = ice_dpll_init_pins(pf, cgu); + if (err) + goto deinit_pps; + mutex_init(&d->lock); + if (cgu) { + err = ice_dpll_init_worker(pf); + if (err) + goto deinit_pins; + } + set_bit(ICE_FLAG_DPLL, pf->flags); + + return; + +deinit_pins: + ice_dpll_deinit_pins(pf, cgu); +deinit_pps: + ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu); +deinit_eec: + ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu); +deinit_info: + ice_dpll_deinit_info(pf); +err_exit: + mutex_destroy(&d->lock); + dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err); +} diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h new file mode 100644 index 0000000000..93172e9399 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dpll.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2022, Intel Corporation. */ + +#ifndef _ICE_DPLL_H_ +#define _ICE_DPLL_H_ + +#include "ice.h" + +#define ICE_DPLL_RCLK_NUM_MAX 4 + +/** ice_dpll_pin - store info about pins + * @pin: dpll pin structure + * @pf: pointer to pf, which has registered the dpll_pin + * @idx: ice pin private idx + * @num_parents: hols number of parent pins + * @parent_idx: hold indexes of parent pins + * @flags: pin flags returned from HW + * @state: state of a pin + * @prop: pin properties + * @freq: current frequency of a pin + * @phase_adjust: current phase adjust value + */ +struct ice_dpll_pin { + struct dpll_pin *pin; + struct ice_pf *pf; + u8 idx; + u8 num_parents; + u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX]; + u8 flags[ICE_DPLL_RCLK_NUM_MAX]; + u8 state[ICE_DPLL_RCLK_NUM_MAX]; + struct dpll_pin_properties prop; + u32 freq; + s32 phase_adjust; +}; + +/** ice_dpll - store info required for DPLL control + * @dpll: pointer to dpll dev + * @pf: pointer to pf, which has registered the dpll_device + * @dpll_idx: index of dpll on the NIC + * @input_idx: currently selected input index + * @prev_input_idx: previously selected input index + * @ref_state: state of dpll reference signals + * @eec_mode: eec_mode dpll is configured for + * @phase_offset: phase offset of active pin vs dpll signal + * @prev_phase_offset: previous phase offset of active pin vs dpll signal + * @input_prio: priorities of each input + * @dpll_state: current dpll sync state + * @prev_dpll_state: last dpll sync state + * @active_input: pointer to active input pin + * @prev_input: pointer to previous active input pin + */ +struct ice_dpll { + struct dpll_device *dpll; + struct ice_pf *pf; + u8 dpll_idx; + u8 input_idx; + u8 prev_input_idx; + u8 ref_state; + u8 eec_mode; + s64 phase_offset; + s64 prev_phase_offset; + u8 *input_prio; + enum dpll_lock_status dpll_state; + enum dpll_lock_status prev_dpll_state; + enum dpll_mode mode; + struct dpll_pin *active_input; + struct dpll_pin *prev_input; +}; + +/** ice_dplls - store info required for CCU (clock controlling unit) + * @kworker: periodic worker + * @work: periodic work + * @lock: locks access to configuration of a dpll + * @eec: pointer to EEC dpll dev + * @pps: pointer to PPS dpll dev + * @inputs: input pins pointer + * @outputs: output pins pointer + * @rclk: recovered pins pointer + * @num_inputs: number of input pins available on dpll + * @num_outputs: number of output pins available on dpll + * @cgu_state_acq_err_num: number of errors returned during periodic work + * @base_rclk_idx: idx of first pin used for clock revocery pins + * @clock_id: clock_id of dplls + * @input_phase_adj_max: max phase adjust value for an input pins + * @output_phase_adj_max: max phase adjust value for an output pins + */ +struct ice_dplls { + struct kthread_worker *kworker; + struct kthread_delayed_work work; + struct mutex lock; + struct ice_dpll eec; + struct ice_dpll pps; + struct ice_dpll_pin *inputs; + struct ice_dpll_pin *outputs; + struct ice_dpll_pin rclk; + u8 num_inputs; + u8 num_outputs; + int cgu_state_acq_err_num; + u8 base_rclk_idx; + u64 clock_id; + s32 input_phase_adj_max; + s32 output_phase_adj_max; +}; + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +void ice_dpll_init(struct ice_pf *pf); +void ice_dpll_deinit(struct ice_pf *pf); +#else +static inline void ice_dpll_init(struct ice_pf *pf) { } +static inline void ice_dpll_deinit(struct ice_pf *pf) { } +#endif + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index 67bfd1f61c..6ae0269bdf 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -73,7 +73,7 @@ ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info, rule_info->sw_act.vsi_handle = vf_vsi_idx; rule_info->sw_act.flag |= ICE_FLTR_RX; rule_info->sw_act.src = pf_id; - rule_info->priority = 5; + rule_info->priority = 2; } static void @@ -84,7 +84,7 @@ ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info, rule_info->sw_act.flag |= ICE_FLTR_TX; rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; rule_info->flags_info.act_valid = true; - rule_info->priority = 5; + rule_info->priority = 2; } static int @@ -207,7 +207,7 @@ ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx, rule_info.allow_pass_l2 = true; rule_info.sw_act.vsi_handle = vsi_idx; rule_info.sw_act.fltr_act = ICE_NOP; - rule_info.priority = 5; + rule_info.priority = 2; err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); if (err) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 9be13e9840..bde9bc74f9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -345,6 +345,88 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) +static const u32 ice_adv_lnk_speed_100[] __initconst = { + ETHTOOL_LINK_MODE_100baseT_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_1000[] __initconst = { + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_2500[] __initconst = { + ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_5000[] __initconst = { + ETHTOOL_LINK_MODE_5000baseT_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_10000[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_25000[] __initconst = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_40000[] __initconst = { + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_50000[] __initconst = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_100000[] __initconst = { + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_200000[] __initconst = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = { + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000), +}; + +void __init ice_adv_lnk_speed_maps_init(void) +{ + ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps, + ARRAY_SIZE(ice_adv_lnk_speed_maps)); +} + static void __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, struct ice_vsi *vsi) @@ -1060,7 +1142,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ICE_VSI_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", ice_gstrings_vsi_stats[i].stat_string); if (ice_is_port_repr_netdev(netdev)) @@ -1080,7 +1162,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, return; for (i = 0; i < ICE_PF_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", ice_gstrings_pf_stats[i].stat_string); for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { @@ -1097,7 +1179,8 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) - ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name); + ethtool_sprintf(&p, "%s", + ice_gstrings_priv_flags[i].name); break; default: break; @@ -1638,6 +1721,15 @@ ice_get_ethtool_stats(struct net_device *netdev, ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ ICE_PHY_TYPE_HIGH_100G_AUI2) +#define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \ + ICE_PHY_TYPE_HIGH_200G_SR4 | \ + ICE_PHY_TYPE_HIGH_200G_FR4 | \ + ICE_PHY_TYPE_HIGH_200G_LR4 | \ + ICE_PHY_TYPE_HIGH_200G_DR4 | \ + ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \ + ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \ + ICE_PHY_TYPE_HIGH_200G_AUI4) + /** * ice_mask_min_supported_speeds * @hw: pointer to the HW structure @@ -1652,8 +1744,9 @@ ice_mask_min_supported_speeds(struct ice_hw *hw, u64 phy_types_high, u64 *phy_types_low) { /* if QSFP connection with 100G speed, minimum supported speed is 25G */ - if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G || - phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) + if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) || + (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) || + (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G)) *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G; else if (!ice_is_100m_speed_supported(hw)) *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; @@ -1796,6 +1889,9 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ice_phy_type_to_ethtool(netdev, ks); switch (link_info->link_speed) { + case ICE_AQ_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; case ICE_AQ_LINK_SPEED_100GB: ks->base.speed = SPEED_100000; break; @@ -2007,6 +2103,55 @@ done: return err; } +/** + * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed + * @speed: ethtool forced speed + */ +static u16 ice_speed_to_aq_link(int speed) +{ + int aq_speed; + + switch (speed) { + case SPEED_10: + aq_speed = ICE_AQ_LINK_SPEED_10MB; + break; + case SPEED_100: + aq_speed = ICE_AQ_LINK_SPEED_100MB; + break; + case SPEED_1000: + aq_speed = ICE_AQ_LINK_SPEED_1000MB; + break; + case SPEED_2500: + aq_speed = ICE_AQ_LINK_SPEED_2500MB; + break; + case SPEED_5000: + aq_speed = ICE_AQ_LINK_SPEED_5GB; + break; + case SPEED_10000: + aq_speed = ICE_AQ_LINK_SPEED_10GB; + break; + case SPEED_20000: + aq_speed = ICE_AQ_LINK_SPEED_20GB; + break; + case SPEED_25000: + aq_speed = ICE_AQ_LINK_SPEED_25GB; + break; + case SPEED_40000: + aq_speed = ICE_AQ_LINK_SPEED_40GB; + break; + case SPEED_50000: + aq_speed = ICE_AQ_LINK_SPEED_50GB; + break; + case SPEED_100000: + aq_speed = ICE_AQ_LINK_SPEED_100GB; + break; + default: + aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + return aq_speed; +} + /** * ice_ksettings_find_adv_link_speed - Find advertising link speed * @ks: ethtool ksettings @@ -2014,73 +2159,14 @@ done: static u16 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) { + const struct ethtool_forced_speed_map *map; u16 adv_link_speed = 0; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 100baseT_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseX_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseT_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseKX_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 2500baseT_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 2500baseX_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 5000baseT_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_5GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseT_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseKR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseSR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseLR_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 25000baseCR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 25000baseSR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 25000baseKR_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_25GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseCR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseSR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseLR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseKR4_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseCR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseKR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseSR2_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseCR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseSR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseLR4_ER4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseKR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseCR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseSR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseKR2_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; + for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) { + map = ice_adv_lnk_speed_maps + i; + if (linkmode_intersects(ks->link_modes.advertising, map->caps)) + adv_link_speed |= ice_speed_to_aq_link(map->speed); + } return adv_link_speed; } @@ -3285,7 +3371,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = ice_get_ptp_clock_index(pf); + info->phc_index = ice_ptp_clock_index(pf); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index b403ee79cd..b88e3da06f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -100,6 +100,14 @@ phy_type_high_lkup[] = { [2] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full), [3] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full), [4] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full), + [5] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full), + [6] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full), + [7] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full), + [8] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full), + [9] = ICE_PHY_TYPE(200GB, 200000baseDR4_Full), + [10] = ICE_PHY_TYPE(200GB, 200000baseKR4_Full), + [11] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full), + [12] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full), }; #endif /* !_ICE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 8c6e13f87b..d151e5bacf 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2018-2020, Intel Corporation. */ +/* Copyright (C) 2018-2023, Intel Corporation. */ /* flow director ethtool support for ice */ @@ -540,16 +540,24 @@ static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) /* total guaranteed filters assigned to this VSI */ num_guar = vsi->num_gfltr; - /* minus the guaranteed filters programed by this VSI */ - num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) & - VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S; - /* total global best effort filters */ num_be = hw->func_caps.fd_fltr_best_effort; - /* minus the global best effort filters programmed */ - num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >> - GLQF_FD_CNT_FD_BCNT_S; + /* Subtract the number of programmed filters from the global values */ + switch (hw->mac_type) { + case ICE_MAC_E830: + num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, + rd32(hw, VSIQF_FD_CNT(vsi_num))); + num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M, + rd32(hw, GLQF_FD_CNT)); + break; + case ICE_MAC_E810: + default: + num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, + rd32(hw, VSIQF_FD_CNT(vsi_num))); + num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M, + rd32(hw, GLQF_FD_CNT)); + } return num_guar + num_be; } diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index 85cca572c2..fb8b925aaf 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1318,7 +1318,6 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, list_del(&entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), entry->entry); devm_kfree(ice_hw_to_dev(hw), entry); return 0; @@ -1645,10 +1644,8 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, *entry_h = ICE_FLOW_ENTRY_HNDL(e); out: - if (status && e) { - devm_kfree(ice_hw_to_dev(hw), e->entry); + if (status) devm_kfree(ice_hw_to_dev(hw), e); - } return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index b465d27d9b..96923ef0a5 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -350,11 +350,8 @@ struct ice_flow_entry { u64 id; struct ice_flow_prof *prof; - /* Flow entry's content */ - void *entry; enum ice_flow_priority priority; u16 vsi_handle; - u16 entry_sz; }; #define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e) diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index 75c9de675f..c8ea1af51a 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -389,6 +389,9 @@ bool ice_gnss_is_gps_present(struct ice_hw *hw) if (!hw->func_caps.ts_func_info.src_tmr_owned) return false; + if (!ice_is_gps_in_netlist(hw)) + return false; + #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) if (ice_is_e810t(hw)) { int err; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 531cc21947..86936b758a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ /* Machine-generated file */ @@ -231,6 +231,7 @@ #define PFINT_SB_CTL 0x0016B600 #define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_SB_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_TSYN_MSK 0x0016C980 #define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) #define QINT_RQCTL_MSIX_INDX_S 0 #define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0) @@ -284,11 +285,11 @@ #define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) #define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) #define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) -#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) -#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 -#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0) -#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) -#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0) +#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT(_i) (0x001E36E0 + ((_i) * 32)) +#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 8 +#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M GENMASK(15, 0) +#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR(_i) (0x001E3800 + ((_i) * 32)) +#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M GENMASK(15, 0) #define GL_MDCK_TX_TDPU 0x00049348 #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) #define GL_MDET_RX 0x00294C00 @@ -311,7 +312,11 @@ #define GL_MDET_TX_PQM_MAL_TYPE_S 26 #define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26) #define GL_MDET_TX_PQM_VALID_M BIT(31) -#define GL_MDET_TX_TCLAN 0x000FC068 +#define GL_MDET_TX_TCLAN_BY_MAC(hw) \ + ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MDET_TX_TCLAN : \ + E800_GL_MDET_TX_TCLAN) +#define E800_GL_MDET_TX_TCLAN 0x000FC068 +#define E830_GL_MDET_TX_TCLAN 0x000FCCC0 #define GL_MDET_TX_TCLAN_QNUM_S 0 #define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0) #define GL_MDET_TX_TCLAN_VF_NUM_S 15 @@ -325,7 +330,11 @@ #define PF_MDET_RX_VALID_M BIT(0) #define PF_MDET_TX_PQM 0x002D2C80 #define PF_MDET_TX_PQM_VALID_M BIT(0) -#define PF_MDET_TX_TCLAN 0x000FC000 +#define PF_MDET_TX_TCLAN_BY_MAC(hw) \ + ((hw)->mac_type == ICE_MAC_E830 ? E830_PF_MDET_TX_TCLAN : \ + E800_PF_MDET_TX_TCLAN) +#define E800_PF_MDET_TX_TCLAN 0x000FC000 +#define E830_PF_MDET_TX_TCLAN 0x000FCC00 #define PF_MDET_TX_TCLAN_VALID_M BIT(0) #define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) #define VP_MDET_RX_VALID_M BIT(0) @@ -335,6 +344,8 @@ #define VP_MDET_TX_TCLAN_VALID_M BIT(0) #define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) #define VP_MDET_TX_TDPU_VALID_M BIT(0) +#define E800_GL_MNG_FWSM_FW_MODES_M GENMASK(2, 0) +#define E830_GL_MNG_FWSM_FW_MODES_M GENMASK(1, 0) #define GL_MNG_FWSM 0x000B6134 #define GL_MNG_FWSM_FW_LOADING_M BIT(30) #define GLNVM_FLA 0x000B6108 @@ -363,13 +374,18 @@ #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) #define GLQF_FD_CNT 0x00460018 +#define E800_GLQF_FD_CNT_FD_GCNT_M GENMASK(14, 0) +#define E830_GLQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) #define GLQF_FD_CNT_FD_BCNT_S 16 -#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16) +#define E800_GLQF_FD_CNT_FD_BCNT_M GENMASK(30, 16) +#define E830_GLQF_FD_CNT_FD_BCNT_M GENMASK(31, 16) #define GLQF_FD_SIZE 0x00460010 #define GLQF_FD_SIZE_FD_GSIZE_S 0 -#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0) +#define E800_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(14, 0) +#define E830_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(15, 0) #define GLQF_FD_SIZE_FD_BSIZE_S 16 -#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16) +#define E800_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(30, 16) +#define E830_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(31, 16) #define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) #define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4)) #define GLQF_FDMASK_MAX_INDEX 31 @@ -388,6 +404,10 @@ #define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) #define GLQF_HMASK_SEL_MAX_INDEX 127 #define GLQF_HMASK_SEL_MASK_SEL_S 0 +#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0) +#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) +#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16) +#define E830_PFQF_FD_CNT_FD_BCNT_M GENMASK(31, 16) #define PFQF_FD_ENA 0x0043A000 #define PFQF_FD_ENA_FD_ENA_M BIT(0) #define PFQF_FD_SIZE 0x00460100 @@ -478,6 +498,7 @@ #define GLTSYN_SYNC_DLAY 0x00088818 #define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4)) #define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4)) +#define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4)) #define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4)) #define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4)) #define PFHH_SEM 0x000A4200 /* Reset Source: PFR */ @@ -486,9 +507,11 @@ #define PFTSYN_SEM_BUSY_M BIT(0) #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT_FD_GCNT_S 0 -#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) +#define E800_VSIQF_FD_CNT_FD_GCNT_M GENMASK(13, 0) +#define E830_VSIQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) #define VSIQF_FD_CNT_FD_BCNT_S 16 -#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16) +#define E800_VSIQF_FD_CNT_FD_BCNT_M GENMASK(29, 16) +#define E830_VSIQF_FD_CNT_FD_BCNT_M GENMASK(31, 16) #define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) #define VSIQF_HKEY_MAX_INDEX 12 #define PFPM_APM 0x000B8080 @@ -500,6 +523,10 @@ #define PFPM_WUS_MAG_M BIT(1) #define PFPM_WUS_MNG_M BIT(3) #define PFPM_WUS_FW_RST_WK_M BIT(31) +#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0 +#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0) +#define E830_PRTMAC_CL01_QNT_THR 0x001E3320 +#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 23e197c3d0..b47cd43ae8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -19,8 +19,11 @@ static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0, static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = { 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + 0, 0, 0, 0, 0, 0, 0x30 }; +static const u8 ice_lport_rcp[ICE_RECIPE_LEN] = { + 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x85, 0, 0x16, 0, 0, 0, 0xff, 0xff, 0x07, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0x30 }; /** * ice_lag_set_primary - set PF LAG state as Primary @@ -173,18 +176,22 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag) } /** - * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG + * ice_lag_cfg_fltr - Add/Remove rule for LAG * @lag: lag struct for local interface + * @act: rule action + * @recipe_id: recipe id for the new rule + * @rule_idx: pointer to rule index * @add: boolean on whether we are adding filters */ static int -ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) +ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, + bool add) { struct ice_sw_rule_lkup_rx_tx *s_rule; u16 s_rule_sz, vsi_num; struct ice_hw *hw; - u32 act, opc; u8 *eth_hdr; + u32 opc; int err; hw = &lag->pf->hw; @@ -193,7 +200,7 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule); s_rule = kzalloc(s_rule_sz, GFP_KERNEL); if (!s_rule) { - dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG default VSI\n"); + dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG\n"); return -ENOMEM; } @@ -201,19 +208,17 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) eth_hdr = s_rule->hdr_data; ice_fill_eth_hdr(eth_hdr); - act = (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) & + act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; - act |= ICE_SINGLE_ACT_VSI_FORWARDING | - ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE; s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); - s_rule->recipe_id = cpu_to_le16(lag->pf_recipe); + s_rule->recipe_id = cpu_to_le16(recipe_id); s_rule->src = cpu_to_le16(hw->port_info->lport); s_rule->act = cpu_to_le32(act); s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN); opc = ice_aqc_opc_add_sw_rules; } else { - s_rule->index = cpu_to_le16(lag->pf_rule_id); + s_rule->index = cpu_to_le16(*rule_idx); opc = ice_aqc_opc_remove_sw_rules; } @@ -222,15 +227,46 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) goto dflt_fltr_free; if (add) - lag->pf_rule_id = le16_to_cpu(s_rule->index); + *rule_idx = le16_to_cpu(s_rule->index); else - lag->pf_rule_id = 0; + *rule_idx = 0; dflt_fltr_free: kfree(s_rule); return err; } +/** + * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG + * @lag: lag struct for local interface + * @add: boolean on whether to add filter + */ +static int +ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) +{ + u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE; + + return ice_lag_cfg_fltr(lag, act, lag->pf_recipe, + &lag->pf_rule_id, add); +} + +/** + * ice_lag_cfg_drop_fltr - Add/Remove lport drop rule + * @lag: lag struct for local interface + * @add: boolean on whether to add filter + */ +static int +ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add) +{ + u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT | + ICE_SINGLE_ACT_DROP; + + return ice_lag_cfg_fltr(lag, act, lag->lport_recipe, + &lag->lport_rule_idx, add); +} + /** * ice_lag_cfg_pf_fltrs - set filters up for new active port * @lag: local interfaces lag struct @@ -257,13 +293,18 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) if (bonding_info->slave.state && lag->pf_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, false)) dev_err(dev, "Error removing old default VSI filter\n"); + if (ice_lag_cfg_drop_fltr(lag, true)) + dev_err(dev, "Error adding new drop filter\n"); return; } /* interface becoming active - add new default VSI rule */ - if (!bonding_info->slave.state && !lag->pf_rule_id) + if (!bonding_info->slave.state && !lag->pf_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, true)) dev_err(dev, "Error adding new default VSI filter\n"); + if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false)) + dev_err(dev, "Error removing old drop filter\n"); + } } /** @@ -430,10 +471,11 @@ static void ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport, u16 vsi_num, u8 tc) { - u16 numq, valq, buf_size, num_moved, qbuf_size; + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); struct device *dev = ice_pf_to_dev(lag->pf); + u16 numq, valq, num_moved, qbuf_size; + u16 buf_size = __struct_size(buf); struct ice_aqc_cfg_txqs_buf *qbuf; - struct ice_aqc_move_elem *buf; struct ice_sched_node *n_prt; struct ice_hw *new_hw = NULL; __le32 teid, parent_teid; @@ -505,26 +547,17 @@ qbuf_none: goto resume_traffic; /* Move Vf's VSI node for this TC to newport's scheduler tree */ - buf_size = struct_size(buf, teid, 1); - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) { - dev_warn(dev, "Failure to alloc memory for VF node failover\n"); - goto resume_traffic; - } - buf->hdr.src_parent_teid = parent_teid; buf->hdr.dest_parent_teid = n_prt->info.node_teid; buf->hdr.num_elems = cpu_to_le16(1); buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; buf->teid[0] = teid; - if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved, - NULL)) + if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) dev_warn(dev, "Failure to move VF nodes for failover\n"); else ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); - kfree(buf); goto resume_traffic; qbuf_err: @@ -798,10 +831,11 @@ static void ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num, u8 tc) { - u16 numq, valq, buf_size, num_moved, qbuf_size; + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); struct device *dev = ice_pf_to_dev(lag->pf); + u16 numq, valq, num_moved, qbuf_size; + u16 buf_size = __struct_size(buf); struct ice_aqc_cfg_txqs_buf *qbuf; - struct ice_aqc_move_elem *buf; struct ice_sched_node *n_prt; __le32 teid, parent_teid; struct ice_vsi_ctx *ctx; @@ -863,26 +897,17 @@ reclaim_none: goto resume_reclaim; /* Move node to new parent */ - buf_size = struct_size(buf, teid, 1); - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) { - dev_warn(dev, "Failure to alloc memory for VF node failover\n"); - goto resume_reclaim; - } - buf->hdr.src_parent_teid = parent_teid; buf->hdr.dest_parent_teid = n_prt->info.node_teid; buf->hdr.num_elems = cpu_to_le16(1); buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; buf->teid[0] = teid; - if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved, - NULL)) + if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n"); else ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); - kfree(buf); goto resume_reclaim; reclaim_qerr: @@ -1238,6 +1263,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) swid = primary_lag->pf->hw.port_info->sw_id; ice_lag_set_swid(swid, lag, true); ice_lag_add_prune_list(primary_lag, lag->pf); + ice_lag_cfg_drop_fltr(lag, true); } /* add filter for primary control packets */ ice_lag_cfg_cp_fltr(lag, true); @@ -1829,10 +1855,11 @@ static void ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw, u16 vsi_num, u8 tc) { - u16 numq, valq, buf_size, num_moved, qbuf_size; + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); struct device *dev = ice_pf_to_dev(lag->pf); + u16 numq, valq, num_moved, qbuf_size; + u16 buf_size = __struct_size(buf); struct ice_aqc_cfg_txqs_buf *qbuf; - struct ice_aqc_move_elem *buf; struct ice_sched_node *n_prt; __le32 teid, parent_teid; struct ice_vsi_ctx *ctx; @@ -1890,26 +1917,17 @@ sync_none: goto resume_sync; /* Move node to new parent */ - buf_size = struct_size(buf, teid, 1); - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) { - dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n"); - goto resume_sync; - } - buf->hdr.src_parent_teid = parent_teid; buf->hdr.dest_parent_teid = n_prt->info.node_teid; buf->hdr.num_elems = cpu_to_le16(1); buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; buf->teid[0] = teid; - if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved, - NULL)) + if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n"); else ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); - kfree(buf); goto resume_sync; sync_qerr: @@ -1992,11 +2010,16 @@ int ice_init_lag(struct ice_pf *pf) goto lag_error; } - err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe, ice_dflt_vsi_rcp, - 1); + err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe, + ice_dflt_vsi_rcp, 1); if (err) goto lag_error; + err = ice_create_lag_recipe(&pf->hw, &lag->lport_recipe, + ice_lport_rcp, 3); + if (err) + goto free_rcp_res; + /* associate recipes to profiles */ for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) { err = ice_aq_get_recipe_to_profile(&pf->hw, n, @@ -2005,7 +2028,8 @@ int ice_init_lag(struct ice_pf *pf) continue; if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) { - recipe_bits |= BIT(lag->pf_recipe); + recipe_bits |= BIT(lag->pf_recipe) | + BIT(lag->lport_recipe); ice_aq_map_recipe_to_profile(&pf->hw, n, (u8 *)&recipe_bits, NULL); } @@ -2016,6 +2040,9 @@ int ice_init_lag(struct ice_pf *pf) dev_dbg(dev, "INIT LAG complete\n"); return 0; +free_rcp_res: + ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, + &pf->lag->pf_recipe); lag_error: kfree(lag); pf->lag = NULL; @@ -2045,6 +2072,8 @@ void ice_deinit_lag(struct ice_pf *pf) ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, &pf->lag->pf_recipe); + ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, + &pf->lag->lport_recipe); kfree(lag); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 7f22987675..ede833dfa6 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -39,8 +39,10 @@ struct ice_lag { u8 bonded:1; /* currently bonded */ u8 primary:1; /* this is primary */ u16 pf_recipe; + u16 lport_recipe; u16 pf_rule_id; u16 cp_rule_idx; + u16 lport_rule_idx; u8 role; }; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a66c3b6cce..c01950de44 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -229,7 +229,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * original vector count */ - vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; + vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; break; case ICE_VSI_CTRL: vsi->alloc_txq = 1; @@ -979,7 +979,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) */ if (ice_is_dvm_ena(hw)) { ctxt->info.inner_vlan_flags |= - ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; + FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, + ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); ctxt->info.outer_vlan_flags = (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & @@ -1186,12 +1187,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; break; case ICE_VSI_VF: /* VF VSI will gets a small RSS table which is a VSI LUT type */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; break; default: dev_dbg(dev, "Unsupported VSI type %s\n", @@ -1831,21 +1832,14 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) { - struct ice_aqc_add_tx_qgrp *qg_buf; - int err; + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) return -EINVAL; - qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; - qg_buf->num_txqs = 1; - err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); - kfree(qg_buf); - return err; + return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); } /** @@ -1887,24 +1881,18 @@ setup_rings: static int ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) { - struct ice_aqc_add_tx_qgrp *qg_buf; - u16 q_idx = 0; + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); int err = 0; - - qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; + u16 q_idx; qg_buf->num_txqs = 1; for (q_idx = 0; q_idx < count; q_idx++) { err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); if (err) - goto err_cfg_txqs; + break; } -err_cfg_txqs: - kfree(qg_buf); return err; } @@ -3989,13 +3977,21 @@ void ice_init_feature_support(struct ice_pf *pf) case ICE_DEV_ID_E810C_BACKPLANE: case ICE_DEV_ID_E810C_QSFP: case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810_XXV_BACKPLANE: + case ICE_DEV_ID_E810_XXV_QSFP: + case ICE_DEV_ID_E810_XXV_SFP: ice_set_feature_support(pf, ICE_F_DSCP); - ice_set_feature_support(pf, ICE_F_PTP_EXTTS); - if (ice_is_e810t(&pf->hw)) { + if (ice_is_phy_rclk_in_netlist(&pf->hw)) + ice_set_feature_support(pf, ICE_F_PHY_RCLK); + /* If we don't own the timer - don't enable other caps */ + if (!ice_pf_src_tmr_owned(pf)) + break; + if (ice_is_cgu_in_netlist(&pf->hw)) + ice_set_feature_support(pf, ICE_F_CGU); + if (ice_is_clock_mux_in_netlist(&pf->hw)) ice_set_feature_support(pf, ICE_F_SMA_CTRL); - if (ice_gnss_is_gps_present(&pf->hw)) - ice_set_feature_support(pf, ICE_F_GNSS); - } + if (ice_gnss_is_gps_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_GNSS); break; default: break; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d8d2aa4c02..adfdea1e28 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ /* Intel(R) Ethernet Connection E800 Series Linux Driver */ @@ -1759,7 +1759,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) wr32(hw, GL_MDET_TX_PQM, 0xffffffff); } - reg = rd32(hw, GL_MDET_TX_TCLAN); + reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); if (reg & GL_MDET_TX_TCLAN_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; @@ -1773,7 +1773,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); - wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); + wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); } reg = rd32(hw, GL_MDET_RX); @@ -1801,9 +1801,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); } - reg = rd32(hw, PF_MDET_TX_TCLAN); + reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw)); if (reg & PF_MDET_TX_TCLAN_VALID_M) { - wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); } @@ -3150,7 +3150,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (!hw->reset_ongoing) + if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf)) set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); } @@ -3160,7 +3160,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; - if (hw->func_caps.ts_func_info.src_tmr_owned) { + if (ice_pf_src_tmr_owned(pf)) { /* Save EVENTs from GLTSYN register */ pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | @@ -3871,7 +3871,8 @@ static void ice_set_pf_caps(struct ice_pf *pf) } clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); - if (func_caps->common_cap.ieee_1588) + if (func_caps->common_cap.ieee_1588 && + !(pf->hw.mac_type == ICE_MAC_E830)) set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); pf->max_pf_txqs = func_caps->common_cap.num_txq; @@ -4666,6 +4667,10 @@ static void ice_init_features(struct ice_pf *pf) if (ice_is_feature_supported(pf, ICE_F_GNSS)) ice_gnss_init(pf); + if (ice_is_feature_supported(pf, ICE_F_CGU) || + ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) + ice_dpll_init(pf); + /* Note: Flow director init failure is non-fatal to load */ if (ice_init_fdir(pf)) dev_err(dev, "could not initialize flow director\n"); @@ -4695,6 +4700,8 @@ static void ice_deinit_features(struct ice_pf *pf) ice_gnss_exit(pf); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_release(pf); + if (test_bit(ICE_FLAG_DPLL, pf->flags)) + ice_dpll_deinit(pf); } static void ice_init_wakeup(struct ice_pf *pf) @@ -5535,7 +5542,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev) return; } - ice_restore_all_vfs_msi_state(pdev); + ice_restore_all_vfs_msi_state(pf); ice_do_reset(pf, ICE_RESET_PFR); ice_service_task_restart(pf); @@ -5578,34 +5585,38 @@ static void ice_pci_err_reset_done(struct pci_dev *pdev) * Class, Class Mask, private data (not used) } */ static const struct pci_device_id ice_pci_tbl[] = { - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) }, /* required last entry */ - { 0, } + {} }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); @@ -5629,6 +5640,8 @@ static struct pci_driver ice_driver = { #endif /* CONFIG_PM */ .shutdown = ice_shutdown, .sriov_configure = ice_sriov_configure, + .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, + .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count, .err_handler = &ice_pci_err_handler }; @@ -5645,6 +5658,8 @@ static int __init ice_module_init(void) pr_info("%s\n", ice_driver_string); pr_info("%s\n", ice_copyright); + ice_adv_lnk_speed_maps_init(); + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); @@ -7386,10 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - /* configure PTP timestamping after VSI rebuild */ - if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_cfg_timestamp(pf, false); - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); if (err) { dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); @@ -7441,6 +7452,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_plug_aux_dev(pf); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) ice_lag_rebuild(pf); + + /* Restore timestamp mode settings after VSI rebuild */ + ice_ptp_restore_timestamp_mode(pf); return; err_vsi_rebuild: diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index c4270708a7..e6b1ce76ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -39,8 +39,8 @@ ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) /* initialize with defaults */ for (i = 0; i < NUM_PTP_PINS_E810T; i++) { - snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), - "%s", ice_pin_desc_e810t[i].name); + strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, + sizeof(ptp_pins[i].name)); ptp_pins[i].index = ice_pin_desc_e810t[i].index; ptp_pins[i].func = ice_pin_desc_e810t[i].func; ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; @@ -256,36 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, } /** - * ice_set_tx_tstamp - Enable or disable Tx timestamping - * @pf: The PF pointer to search in - * @on: bool value for whether timestamps are enabled or disabled + * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device + * @pf: Board private structure + * + * Program the device to respond appropriately to the Tx timestamp interrupt + * cause. */ -static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) +static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) { - struct ice_vsi *vsi; + struct ice_hw *hw = &pf->hw; + bool enable; u32 val; - u16 i; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return; - /* Set the timestamp enable flag for all the Tx rings */ - ice_for_each_txq(vsi, i) { - if (!vsi->tx_rings[i]) - continue; - vsi->tx_rings[i]->ptp_tx = on; + switch (pf->ptp.tx_interrupt_mode) { + case ICE_PTP_TX_INTERRUPT_ALL: + /* React to interrupts across all quads. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); + enable = true; + break; + case ICE_PTP_TX_INTERRUPT_NONE: + /* Do not react to interrupts on any quad. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); + enable = false; + break; + case ICE_PTP_TX_INTERRUPT_SELF: + default: + enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; + break; } /* Configure the Tx timestamp interrupt */ - val = rd32(&pf->hw, PFINT_OICR_ENA); - if (on) + val = rd32(hw, PFINT_OICR_ENA); + if (enable) val |= PFINT_OICR_TSYN_TX_M; else val &= ~PFINT_OICR_TSYN_TX_M; - wr32(&pf->hw, PFINT_OICR_ENA, val); - - pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + wr32(hw, PFINT_OICR_ENA, val); } /** @@ -299,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) u16 i; vsi = ice_get_main_vsi(pf); - if (!vsi) + if (!vsi || !vsi->rx_rings) return; /* Set the timestamp flag for all the Rx rings */ @@ -308,148 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) continue; vsi->rx_rings[i]->ptp_rx = on; } - - pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : - HWTSTAMP_FILTER_NONE; } /** - * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit + * ice_ptp_disable_timestamp_mode - Disable current timestamp mode * @pf: Board private structure - * @ena: bool value to enable or disable time stamp - * - * This function will configure timestamping during PTP initialization - * and deinitialization - */ -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) -{ - ice_set_tx_tstamp(pf, ena); - ice_set_rx_tstamp(pf, ena); -} - -/** - * ice_get_ptp_clock_index - Get the PTP clock index - * @pf: the PF pointer * - * Determine the clock index of the PTP clock associated with this device. If - * this is the PF controlling the clock, just use the local access to the - * clock device pointer. - * - * Otherwise, read from the driver shared parameters to determine the clock - * index value. - * - * Returns: the index of the PTP clock associated with this device, or -1 if - * there is no associated clock. + * Called during preparation for reset to temporarily disable timestamping on + * the device. Called during remove to disable timestamping while cleaning up + * driver resources. */ -int ice_get_ptp_clock_index(struct ice_pf *pf) +static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - enum ice_aqc_driver_params param_idx; struct ice_hw *hw = &pf->hw; - u8 tmr_idx; - u32 value; - int err; - - /* Use the ptp_clock structure if we're the main PF */ - if (pf->ptp.clock) - return ptp_clock_index(pf->ptp.clock); - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - if (!tmr_idx) - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; - else - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; - - err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); - if (err) { - dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n", - err, ice_aq_str(hw->adminq.sq_last_status)); - return -1; - } - - /* The PTP clock index is an integer, and will be between 0 and - * INT_MAX. The highest bit of the driver shared parameter is used to - * indicate whether or not the currently stored clock index is valid. - */ - if (!(value & PTP_SHARED_CLK_IDX_VALID)) - return -1; - - return value & ~PTP_SHARED_CLK_IDX_VALID; -} - -/** - * ice_set_ptp_clock_index - Set the PTP clock index - * @pf: the PF pointer - * - * Set the PTP clock index for this device into the shared driver parameters, - * so that other PFs associated with this device can read it. - * - * If the PF is unable to store the clock index, it will log an error, but - * will continue operating PTP. - */ -static void ice_set_ptp_clock_index(struct ice_pf *pf) -{ - struct device *dev = ice_pf_to_dev(pf); - enum ice_aqc_driver_params param_idx; - struct ice_hw *hw = &pf->hw; - u8 tmr_idx; - u32 value; - int err; - - if (!pf->ptp.clock) - return; - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - if (!tmr_idx) - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; - else - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; + u32 val; - value = (u32)ptp_clock_index(pf->ptp.clock); - if (value > INT_MAX) { - dev_err(dev, "PTP Clock index is too large to store\n"); - return; - } - value |= PTP_SHARED_CLK_IDX_VALID; + val = rd32(hw, PFINT_OICR_ENA); + val &= ~PFINT_OICR_TSYN_TX_M; + wr32(hw, PFINT_OICR_ENA, val); - err = ice_aq_set_driver_param(hw, param_idx, value, NULL); - if (err) { - dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n", - err, ice_aq_str(hw->adminq.sq_last_status)); - } + ice_set_rx_tstamp(pf, false); } /** - * ice_clear_ptp_clock_index - Clear the PTP clock index - * @pf: the PF pointer + * ice_ptp_restore_timestamp_mode - Restore timestamp configuration + * @pf: Board private structure * - * Clear the PTP clock index for this device. Must be called when - * unregistering the PTP clock, in order to ensure other PFs stop reporting - * a clock object that no longer exists. + * Called at the end of rebuild to restore timestamp configuration after + * a device reset. */ -static void ice_clear_ptp_clock_index(struct ice_pf *pf) +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - enum ice_aqc_driver_params param_idx; struct ice_hw *hw = &pf->hw; - u8 tmr_idx; - int err; + bool enable_rx; - /* Do not clear the index if we don't own the timer */ - if (!hw->func_caps.ts_func_info.src_tmr_owned) - return; + ice_ptp_cfg_tx_interrupt(pf); - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - if (!tmr_idx) - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; - else - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; + enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; + ice_set_rx_tstamp(pf, enable_rx); - err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); - if (err) { - dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n", - err, ice_aq_str(hw->adminq.sq_last_status)); - } + /* Trigger an immediate software interrupt to ensure that timestamps + * which occurred during reset are handled now. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); } /** @@ -674,9 +582,6 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) int err; u8 idx; - if (!tx->init) - return; - ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); hw = &pf->hw; @@ -774,6 +679,39 @@ skip_ts_read: } } +/** + * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device + * @pf: Board private structure + */ +static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) +{ + struct ice_ptp_port *port; + unsigned int i; + + mutex_lock(&pf->ptp.ports_owner.lock); + list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { + struct ice_ptp_tx *tx = &port->tx; + + if (!tx || !tx->init) + continue; + + ice_ptp_process_tx_tstamp(tx); + } + mutex_unlock(&pf->ptp.ports_owner.lock); + + for (i = 0; i < ICE_MAX_QUAD; i++) { + u64 tstamp_ready; + int err; + + /* Read the Tx ready status first */ + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); + if (err || tstamp_ready) + return ICE_TX_TSTAMP_WORK_PENDING; + } + + return ICE_TX_TSTAMP_WORK_DONE; +} + /** * ice_ptp_tx_tstamp - Process Tx timestamps for this function. * @tx: Tx tracking structure to initialize @@ -1366,6 +1304,7 @@ out_unlock: void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) { struct ice_ptp_port *ptp_port; + struct ice_hw *hw = &pf->hw; if (!test_bit(ICE_FLAG_PTP, pf->flags)) return; @@ -1380,11 +1319,16 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) /* Update cached link status for this port immediately */ ptp_port->link_up = linkup; - /* E810 devices do not need to reconfigure the PHY */ - if (ice_is_e810(&pf->hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: + /* Do not reconfigure E810 PHY */ return; - - ice_ptp_port_phy_restart(ptp_port); + case ICE_PHY_E822: + ice_ptp_port_phy_restart(ptp_port); + return; + default: + dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); + } } /** @@ -1440,6 +1384,24 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) ice_ptp_port_phy_restart(&pf->ptp.port); } +/** + * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping + * @pf: Board private structure + */ +static void ice_ptp_restart_all_phy(struct ice_pf *pf) +{ + struct list_head *entry; + + list_for_each(entry, &pf->ptp.ports_owner.ports) { + struct ice_ptp_port *port = list_entry(entry, + struct ice_ptp_port, + list_member); + + if (port->link_up) + ice_ptp_port_phy_restart(port); + } +} + /** * ice_ptp_adjfine - Adjust clock increment rate * @info: the driver's PTP info structure @@ -1877,9 +1839,9 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) /* Reenable periodic outputs */ ice_ptp_enable_all_clkout(pf); - /* Recalibrate and re-enable timestamp block */ - if (pf->ptp.port.link_up) - ice_ptp_port_phy_restart(&pf->ptp.port); + /* Recalibrate and re-enable timestamp blocks for E822/E823 */ + if (hw->phy_model == ICE_PHY_E822) + ice_ptp_restart_all_phy(pf); exit: if (err) { dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); @@ -1976,21 +1938,32 @@ ice_ptp_get_syncdevicetime(ktime_t *device, u32 hh_lock, hh_art_ctl; int i; - /* Get the HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); +#define MAX_HH_HW_LOCK_TRIES 5 +#define MAX_HH_CTL_LOCK_TRIES 100 + + for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { + /* Get the HW lock */ + hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); + if (hh_lock & PFHH_SEM_BUSY_M) { + usleep_range(10000, 15000); + continue; + } + break; + } if (hh_lock & PFHH_SEM_BUSY_M) { dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); - return -EFAULT; + return -EBUSY; } + /* Program cmd to master timer */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + /* Start the ART and device clock sync sequence */ hh_art_ctl = rd32(hw, GLHH_ART_CTL); hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; wr32(hw, GLHH_ART_CTL, hh_art_ctl); -#define MAX_HH_LOCK_TRIES 100 - - for (i = 0; i < MAX_HH_LOCK_TRIES; i++) { + for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { /* Wait for sync to complete */ hh_art_ctl = rd32(hw, GLHH_ART_CTL); if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { @@ -2014,19 +1987,23 @@ ice_ptp_get_syncdevicetime(ktime_t *device, break; } } + + /* Clear the master timer */ + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + /* Release HW lock */ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); - if (i == MAX_HH_LOCK_TRIES) + if (i == MAX_HH_CTL_LOCK_TRIES) return -ETIMEDOUT; return 0; } /** - * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp + * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp * @info: the driver's PTP info structure * @cts: The memory to fill the cross timestamp info * @@ -2034,14 +2011,14 @@ ice_ptp_get_syncdevicetime(ktime_t *device, * clock. Fill the cross timestamp information and report it back to the * caller. * - * This is only valid for E822 devices which have support for generating the - * cross timestamp via PCIe PTM. + * This is only valid for E822 and E823 devices which have support for + * generating the cross timestamp via PCIe PTM. * * In order to correctly correlate the ART timestamp back to the TSC time, the * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. */ static int -ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info, +ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, struct system_device_crosststamp *cts) { struct ice_pf *pf = ptp_info_to_pf(info); @@ -2081,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) { switch (config->tx_type) { case HWTSTAMP_TX_OFF: - ice_set_tx_tstamp(pf, false); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; break; case HWTSTAMP_TX_ON: - ice_set_tx_tstamp(pf, true); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; break; default: return -ERANGE; @@ -2092,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - ice_set_rx_tstamp(pf, false); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: @@ -2108,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: - ice_set_rx_tstamp(pf, true); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } + /* Immediately update the device timestamping mode */ + ice_ptp_restore_timestamp_mode(pf); + return 0; } @@ -2277,22 +2257,22 @@ ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) } /** - * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support + * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support * @pf: Board private structure * @info: PTP info to fill * - * Assign functions to the PTP capabiltiies structure for E822 devices. + * Assign functions to the PTP capabiltiies structure for E82x devices. * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for E822 + * in ice_ptp_set_caps. Only add functions here which are distinct for E82x * devices. */ static void -ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info) +ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) { #ifdef CONFIG_ICE_HWTS if (boot_cpu_has(X86_FEATURE_ART) && boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) - info->getcrosststamp = ice_ptp_getcrosststamp_e822; + info->getcrosststamp = ice_ptp_getcrosststamp_e82x; #endif /* CONFIG_ICE_HWTS */ } @@ -2326,6 +2306,8 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) static void ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) { + ice_ptp_set_funcs_e82x(pf, info); + info->enable = ice_ptp_gpio_enable_e823; ice_ptp_setup_pins_e823(pf, info); } @@ -2353,7 +2335,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf) else if (ice_is_e823(&pf->hw)) ice_ptp_set_funcs_e823(pf, info); else - ice_ptp_set_funcs_e822(pf, info); + ice_ptp_set_funcs_e82x(pf, info); } /** @@ -2368,7 +2350,6 @@ static void ice_ptp_set_caps(struct ice_pf *pf) static long ice_ptp_create_clock(struct ice_pf *pf) { struct ptp_clock_info *info; - struct ptp_clock *clock; struct device *dev; /* No need to create a clock device if we already have one */ @@ -2381,11 +2362,11 @@ static long ice_ptp_create_clock(struct ice_pf *pf) dev = ice_pf_to_dev(pf); /* Attempt to register the clock before enabling the hardware. */ - clock = ptp_clock_register(info, dev); - if (IS_ERR(clock)) - return PTR_ERR(clock); - - pf->ptp.clock = clock; + pf->ptp.clock = ptp_clock_register(info, dev); + if (IS_ERR(pf->ptp.clock)) { + dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); + return PTR_ERR(pf->ptp.clock); + } return 0; } @@ -2442,7 +2423,21 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) */ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) { - return ice_ptp_tx_tstamp(&pf->ptp.port.tx); + switch (pf->ptp.tx_interrupt_mode) { + case ICE_PTP_TX_INTERRUPT_NONE: + /* This device has the clock owner handle timestamps for it */ + return ICE_TX_TSTAMP_WORK_DONE; + case ICE_PTP_TX_INTERRUPT_SELF: + /* This device handles its own timestamps */ + return ice_ptp_tx_tstamp(&pf->ptp.port.tx); + case ICE_PTP_TX_INTERRUPT_ALL: + /* This device handles timestamps for all ports */ + return ice_ptp_tx_tstamp_owner(pf); + default: + WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", + pf->ptp.tx_interrupt_mode); + return ICE_TX_TSTAMP_WORK_DONE; + } } static void ice_ptp_periodic_work(struct kthread_work *work) @@ -2476,7 +2471,7 @@ void ice_ptp_reset(struct ice_pf *pf) if (test_bit(ICE_PFR_REQ, pf->state)) goto pfr; - if (!hw->func_caps.ts_func_info.src_tmr_owned) + if (!ice_pf_src_tmr_owned(pf)) goto reset_ts; err = ice_ptp_init_phc(hw); @@ -2551,6 +2546,211 @@ err: dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); } +/** + * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device + * @aux_dev: auxiliary device to get the auxiliary PF for + */ +static struct ice_pf * +ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) +{ + struct ice_ptp_port *aux_port; + struct ice_ptp *aux_ptp; + + aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); + aux_ptp = container_of(aux_port, struct ice_ptp, port); + + return container_of(aux_ptp, struct ice_pf, ptp); +} + +/** + * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device + * @aux_dev: auxiliary device to get the PF for + */ +static struct ice_pf * +ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) +{ + struct ice_ptp_port_owner *ports_owner; + struct auxiliary_driver *aux_drv; + struct ice_ptp *owner_ptp; + + if (!aux_dev->dev.driver) + return NULL; + + aux_drv = to_auxiliary_drv(aux_dev->dev.driver); + ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, + aux_driver); + owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); + return container_of(owner_ptp, struct ice_pf, ptp); +} + +/** + * ice_ptp_auxbus_probe - Probe auxiliary devices + * @aux_dev: PF's auxiliary device + * @id: Auxiliary device ID + */ +static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *id) +{ + struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); + struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); + + if (WARN_ON(!owner_pf)) + return -ENODEV; + + INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); + mutex_lock(&owner_pf->ptp.ports_owner.lock); + list_add(&aux_pf->ptp.port.list_member, + &owner_pf->ptp.ports_owner.ports); + mutex_unlock(&owner_pf->ptp.ports_owner.lock); + + return 0; +} + +/** + * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus + * @aux_dev: PF's auxiliary device + */ +static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) +{ + struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); + struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); + + mutex_lock(&owner_pf->ptp.ports_owner.lock); + list_del(&aux_pf->ptp.port.list_member); + mutex_unlock(&owner_pf->ptp.ports_owner.lock); +} + +/** + * ice_ptp_auxbus_shutdown + * @aux_dev: PF's auxiliary device + */ +static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) +{ + /* Doing nothing here, but handle to auxbus driver must be satisfied */ +} + +/** + * ice_ptp_auxbus_suspend + * @aux_dev: PF's auxiliary device + * @state: power management state indicator + */ +static int +ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) +{ + /* Doing nothing here, but handle to auxbus driver must be satisfied */ + return 0; +} + +/** + * ice_ptp_auxbus_resume + * @aux_dev: PF's auxiliary device + */ +static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) +{ + /* Doing nothing here, but handle to auxbus driver must be satisfied */ + return 0; +} + +/** + * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table + * @pf: Board private structure + * @name: auxiliary bus driver name + */ +static struct auxiliary_device_id * +ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) +{ + struct auxiliary_device_id *ids; + + /* Second id left empty to terminate the array */ + ids = devm_kcalloc(ice_pf_to_dev(pf), 2, + sizeof(struct auxiliary_device_id), GFP_KERNEL); + if (!ids) + return NULL; + + snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); + + return ids; +} + +/** + * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver + * @pf: Board private structure + */ +static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) +{ + struct auxiliary_driver *aux_driver; + struct ice_ptp *ptp; + struct device *dev; + char *name; + int err; + + ptp = &pf->ptp; + dev = ice_pf_to_dev(pf); + aux_driver = &ptp->ports_owner.aux_driver; + INIT_LIST_HEAD(&ptp->ports_owner.ports); + mutex_init(&ptp->ports_owner.lock); + name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", + pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), + ice_get_ptp_src_clock_index(&pf->hw)); + if (!name) + return -ENOMEM; + + aux_driver->name = name; + aux_driver->shutdown = ice_ptp_auxbus_shutdown; + aux_driver->suspend = ice_ptp_auxbus_suspend; + aux_driver->remove = ice_ptp_auxbus_remove; + aux_driver->resume = ice_ptp_auxbus_resume; + aux_driver->probe = ice_ptp_auxbus_probe; + aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); + if (!aux_driver->id_table) + return -ENOMEM; + + err = auxiliary_driver_register(aux_driver); + if (err) { + devm_kfree(dev, aux_driver->id_table); + dev_err(dev, "Failed registering aux_driver, name <%s>\n", + name); + } + + return err; +} + +/** + * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver + * @pf: Board private structure + */ +static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) +{ + struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; + + auxiliary_driver_unregister(aux_driver); + devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); + + mutex_destroy(&pf->ptp.ports_owner.lock); +} + +/** + * ice_ptp_clock_index - Get the PTP clock index for this device + * @pf: Board private structure + * + * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock + * is associated. + */ +int ice_ptp_clock_index(struct ice_pf *pf) +{ + struct auxiliary_device *aux_dev; + struct ice_pf *owner_pf; + struct ptp_clock *clock; + + aux_dev = &pf->ptp.port.aux_dev; + owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); + if (!owner_pf) + return -1; + clock = owner_pf->ptp.clock; + + return clock ? ptp_clock_index(clock) : -1; +} + /** * ice_ptp_prepare_for_reset - Prepare PTP for reset * @pf: Board private structure @@ -2563,7 +2763,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf) clear_bit(ICE_FLAG_PTP, pf->flags); /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); kthread_cancel_delayed_work_sync(&ptp->work); @@ -2641,11 +2841,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf) if (err) goto err_clk; - /* Store the PTP clock index for other PFs */ - ice_set_ptp_clock_index(pf); + err = ice_ptp_register_auxbus_driver(pf); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); + goto err_aux; + } return 0; - +err_aux: + ptp_clock_unregister(pf->ptp.clock); err_clk: pf->ptp.clock = NULL; err_exit: @@ -2687,14 +2891,119 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) */ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) { + struct ice_hw *hw = &pf->hw; + mutex_init(&ptp_port->ps_lock); - if (ice_is_e810(&pf->hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); + case ICE_PHY_E822: + kthread_init_delayed_work(&ptp_port->ov_work, + ice_ptp_wait_for_offsets); + + return ice_ptp_init_tx_e822(pf, &ptp_port->tx, + ptp_port->port_num); + default: + return -ENODEV; + } +} - kthread_init_delayed_work(&ptp_port->ov_work, - ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num); +/** + * ice_ptp_release_auxbus_device + * @dev: device that utilizes the auxbus + */ +static void ice_ptp_release_auxbus_device(struct device *dev) +{ + /* Doing nothing here, but handle to auxbux device must be satisfied */ +} + +/** + * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device + * @pf: Board private structure + */ +static int ice_ptp_create_auxbus_device(struct ice_pf *pf) +{ + struct auxiliary_device *aux_dev; + struct ice_ptp *ptp; + struct device *dev; + char *name; + int err; + u32 id; + + ptp = &pf->ptp; + id = ptp->port.port_num; + dev = ice_pf_to_dev(pf); + + aux_dev = &ptp->port.aux_dev; + + name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", + pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), + ice_get_ptp_src_clock_index(&pf->hw)); + if (!name) + return -ENOMEM; + + aux_dev->name = name; + aux_dev->id = id; + aux_dev->dev.release = ice_ptp_release_auxbus_device; + aux_dev->dev.parent = dev; + + err = auxiliary_device_init(aux_dev); + if (err) + goto aux_err; + + err = auxiliary_device_add(aux_dev); + if (err) { + auxiliary_device_uninit(aux_dev); + goto aux_err; + } + + return 0; +aux_err: + dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); + devm_kfree(dev, name); + return err; +} + +/** + * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device + * @pf: Board private structure + */ +static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) +{ + struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; + + auxiliary_device_delete(aux_dev); + auxiliary_device_uninit(aux_dev); + + memset(aux_dev, 0, sizeof(*aux_dev)); +} + +/** + * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode + * @pf: Board private structure + * + * Initialize the Tx timestamp interrupt mode for this device. For most device + * types, each PF processes the interrupt and manages its own timestamps. For + * E822-based devices, only the clock owner processes the timestamps. Other + * PFs disable the interrupt and do not process their own timestamps. + */ +static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) +{ + switch (pf->hw.phy_model) { + case ICE_PHY_E822: + /* E822 based PHY has the clock owner process the interrupt + * for all ports. + */ + if (ice_pf_src_tmr_owned(pf)) + pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; + else + pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; + break; + default: + /* other PHY types handle their own Tx interrupt */ + pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; + } } /** @@ -2715,10 +3024,14 @@ void ice_ptp_init(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; int err; + ice_ptp_init_phy_model(hw); + + ice_ptp_init_tx_interrupt_mode(pf); + /* If this function owns the clock hardware, it must allocate and * configure the PTP clock device to represent it. */ - if (hw->func_caps.ts_func_info.src_tmr_owned) { + if (ice_pf_src_tmr_owned(pf)) { err = ice_ptp_init_owner(pf); if (err) goto err; @@ -2732,11 +3045,18 @@ void ice_ptp_init(struct ice_pf *pf) /* Start the PHY timestamping block */ ice_ptp_reset_phy_timestamping(pf); + /* Configure initial Tx interrupt settings */ + ice_ptp_cfg_tx_interrupt(pf); + set_bit(ICE_FLAG_PTP, pf->flags); err = ice_ptp_init_work(pf, ptp); if (err) goto err; + err = ice_ptp_create_auxbus_device(pf); + if (err) + goto err; + dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); return; @@ -2763,7 +3083,9 @@ void ice_ptp_release(struct ice_pf *pf) return; /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); + + ice_ptp_remove_auxbus_device(pf); ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); @@ -2784,9 +3106,10 @@ void ice_ptp_release(struct ice_pf *pf) /* Disable periodic outputs */ ice_ptp_disable_all_clkout(pf); - ice_clear_ptp_clock_index(pf); ptp_clock_unregister(pf->ptp.clock); pf->ptp.clock = NULL; + ice_ptp_unregister_auxbus_driver(pf); + dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 995a57019b..06a330867f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -157,7 +157,9 @@ struct ice_ptp_tx { * ready for PTP functionality. It is used to track the port initialization * and determine when the port's PHY offset is valid. * + * @list_member: list member structure of auxiliary device * @tx: Tx timestamp tracking for this port + * @aux_dev: auxiliary device associated with this port * @ov_work: delayed work task for tracking when PHY offset is valid * @ps_lock: mutex used to protect the overall PTP PHY start procedure * @link_up: indicates whether the link is up @@ -165,7 +167,9 @@ struct ice_ptp_tx { * @port_num: the port number this structure represents */ struct ice_ptp_port { + struct list_head list_member; struct ice_ptp_tx tx; + struct auxiliary_device aux_dev; struct kthread_delayed_work ov_work; struct mutex ps_lock; /* protects overall PTP PHY start procedure */ bool link_up; @@ -173,11 +177,35 @@ struct ice_ptp_port { u8 port_num; }; +enum ice_ptp_tx_interrupt { + ICE_PTP_TX_INTERRUPT_NONE = 0, + ICE_PTP_TX_INTERRUPT_SELF, + ICE_PTP_TX_INTERRUPT_ALL, +}; + +/** + * struct ice_ptp_port_owner - data used to handle the PTP clock owner info + * + * This structure contains data necessary for the PTP clock owner to correctly + * handle the timestamping feature for all attached ports. + * + * @aux_driver: the structure carring the auxiliary driver information + * @ports: list of porst handled by this port owner + * @lock: protect access to ports list + */ +struct ice_ptp_port_owner { + struct auxiliary_driver aux_driver; + struct list_head ports; + struct mutex lock; +}; + #define GLTSYN_TGT_H_IDX_MAX 4 /** * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK + * @tx_interrupt_mode: the TX interrupt mode for the PTP clock * @port: data for the PHY port initialization procedure + * @ports_owner: data for the auxiliary driver owner * @work: delayed work function for periodic tasks * @cached_phc_time: a cached copy of the PHC time for timestamp extension * @cached_phc_jiffies: jiffies when cached_phc_time was last updated @@ -197,7 +225,9 @@ struct ice_ptp_port { * @late_cached_phc_updates: number of times cached PHC update is late */ struct ice_ptp { + enum ice_ptp_tx_interrupt tx_interrupt_mode; struct ice_ptp_port port; + struct ice_ptp_port_owner ports_owner; struct kthread_delayed_work work; u64 cached_phc_time; unsigned long cached_phc_jiffies; @@ -258,11 +288,11 @@ struct ice_ptp { #define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int ice_ptp_clock_index(struct ice_pf *pf); struct ice_pf; int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena); -int ice_get_ptp_clock_index(struct ice_pf *pf); +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf); void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); @@ -287,12 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) return -EOPNOTSUPP; } -static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { } -static inline int ice_get_ptp_clock_index(struct ice_pf *pf) -{ - return -1; -} - +static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { } static inline void ice_ptp_extts_event(struct ice_pf *pf) { } static inline s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) @@ -314,5 +339,10 @@ static inline void ice_ptp_release(struct ice_pf *pf) { } static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) { } + +static inline int ice_ptp_clock_index(struct ice_pf *pf) +{ + return -1; +} #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ #endif /* _ICE_PTP_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index f818dd215c..a00b55e14a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -7,6 +7,132 @@ #include "ice_ptp_consts.h" #include "ice_cgu_regs.h" +static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = { + DPLL_PIN_FREQUENCY_1PPS, + DPLL_PIN_FREQUENCY_10MHZ, +}; + +static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = { + DPLL_PIN_FREQUENCY_1PPS, +}; + +static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = { + DPLL_PIN_FREQUENCY_10MHZ, +}; + +static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = { + { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, }, + { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, }, + { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, }, +}; + +static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = { + { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, }, + { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, }, + { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, }, + { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, }, + { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, }, +}; + +static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = { + { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, }, + { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, }, + { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, +}; + +static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = { + { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, +}; + +static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = { + { "NONE", SI_REF0P, 0, 0 }, + { "NONE", SI_REF0N, 0, 0 }, + { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 }, + { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 }, + { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "NONE", SI_REF2N, 0, 0 }, + { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, +}; + +static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = { + { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz }, + { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, +}; + +static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = { + { "NONE", ZL_REF0P, 0, 0 }, + { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 }, + { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 }, + { "NONE", ZL_REF2P, 0, 0 }, + { "NONE", ZL_REF2N, 0, 0 }, + { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "NONE", ZL_REF3N, 0, 0 }, + { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 }, +}; + +static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = { + { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz }, + { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "NONE", ZL_OUT5, 0, 0 }, +}; + /* Low level functions for interacting with and managing the device clock used * for the Precision Time Protocol. * @@ -107,7 +233,7 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw) * * Prepare the source timer for an upcoming timer sync command. */ -static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { u32 cmd_val; u8 tmr_idx; @@ -116,19 +242,19 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) cmd_val = tmr_idx << SEL_CPK_SRC; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val |= GLTSYN_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val |= GLTSYN_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val |= GLTSYN_CMD_ADJ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val |= GLTSYN_CMD_READ_TIME; break; case ICE_PTP_NOP: @@ -168,9 +294,9 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY; - phy = port / ICE_PORTS_PER_PHY; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE; + phy_port = port % ICE_PORTS_PER_PHY_E822; + phy = port / ICE_PORTS_PER_PHY_E822; + quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822; if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -495,20 +621,25 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * Fill a message buffer for accessing a register in a quad shared between * multiple PHYs. */ -static void +static int ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) { u32 addr; + if (quad >= ICE_MAX_QUAD) + return -EINVAL; + msg->dest_dev = rmn_0; - if ((quad % ICE_NUM_QUAD_TYPE) == 0) + if ((quad % ICE_QUADS_PER_PHY_E822) == 0) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; msg->msg_addr_low = lower_16_bits(addr); msg->msg_addr_high = upper_16_bits(addr); + + return 0; } /** @@ -527,10 +658,10 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - if (quad >= ICE_MAX_QUAD) - return -EINVAL; + err = ice_fill_quad_msg_e822(&msg, quad, offset); + if (err) + return err; - ice_fill_quad_msg_e822(&msg, quad, offset); msg.opcode = ice_sbq_msg_rd; err = ice_sbq_rw_reg(hw, &msg); @@ -561,10 +692,10 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - if (quad >= ICE_MAX_QUAD) - return -EINVAL; + err = ice_fill_quad_msg_e822(&msg, quad, offset); + if (err) + return err; - ice_fill_quad_msg_e822(&msg, quad, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; @@ -628,29 +759,32 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * @quad: the quad to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the PHY quad block that is - * shared between the internal PHYs on the E822 devices. + * Read the timestamp out of the quad to clear its timestamp status bit from + * the PHY quad block that is shared between the internal PHYs of the E822 + * devices. + * + * Note that unlike E810, software cannot directly write to the quad memory + * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function + * to determine which timestamps are valid. Reading a timestamp auto-clears + * the valid bit. + * + * To directly clear the contents of the timestamp block entirely, discarding + * all timestamp data at once, software should instead use + * ice_ptp_reset_ts_memory_quad_e822(). + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready(). */ static int ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) { - u16 lo_addr, hi_addr; + u64 unused_tstamp; int err; - lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); - hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); - - err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0); + err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp); if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", - err); - return err; - } - - err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", - err); + ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n", + quad, idx, err); return err; } @@ -1025,7 +1159,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) * @time: Time to initialize the PHY port clocks to * * Program the PHY port registers with a new initial time value. The port - * clock will be initialized once the driver issues an INIT_TIME sync + * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync * command. The time value is the upper 32 bits of the PHY timer, usually in * units of nominal nanoseconds. */ @@ -1074,7 +1208,7 @@ exit_err: * * Program the port for an atomic adjustment by writing the Tx and Rx timer * registers. The atomic adjustment won't be completed until the driver issues - * an ADJ_TIME command. + * an ICE_PTP_ADJ_TIME command. * * Note that time is not in units of nanoseconds. It is in clock time * including the lower sub-nanosecond portion of the port timer. @@ -1127,7 +1261,7 @@ exit_err: * * Prepare the PHY ports for an atomic time adjustment by programming the PHY * Tx and Rx port registers. The actual adjustment is completed by issuing an - * ADJ_TIME or ADJ_TIME_AT_TIME sync command. + * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. */ static int ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) @@ -1162,7 +1296,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) * * Prepare each of the PHY ports for a new increment value by programming the * port's TIMETUS registers. The new increment value will be updated after - * issuing an INIT_INCVAL command. + * issuing an ICE_PTP_INIT_INCVAL command. */ static int ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) @@ -1248,19 +1382,19 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd tmr_idx = ice_get_ptp_src_clock_index(hw); cmd_val = tmr_idx << SEL_PHY_SRC; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val |= PHY_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val |= PHY_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val |= PHY_CMD_ADJ_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val |= PHY_CMD_READ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; break; case ICE_PTP_NOP: @@ -2196,8 +2330,8 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * @phy_time: on return, the 64bit PHY timer value * @phc_time: on return, the lower 64bits of PHC time * - * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC - * timer values. + * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY + * and PHC timer values. */ static int ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, @@ -2210,15 +2344,15 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, tmr_idx = ice_get_ptp_src_clock_index(hw); - /* Prepare the PHC timer for a READ_TIME capture command */ - ice_ptp_src_cmd(hw, READ_TIME); + /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); - /* Prepare the PHY timer for a READ_TIME capture command */ - err = ice_ptp_one_port_cmd(hw, port, READ_TIME); + /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */ + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME); if (err) return err; - /* Issue the sync to start the READ_TIME capture */ + /* Issue the sync to start the ICE_PTP_READ_TIME capture */ ice_ptp_exec_tmr_cmd(hw); /* Read the captured PHC time from the shadow time registers */ @@ -2252,10 +2386,11 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, * @port: the PHY port to synchronize * * Perform an adjustment to ensure that the PHY and PHC timers are in sync. - * This is done by issuing a READ_TIME command which triggers a simultaneous - * read of the PHY timer and PHC timer. Then we use the difference to - * calculate an appropriate 2s complement addition to add to the PHY timer in - * order to ensure it reads the same value as the primary PHC timer. + * This is done by issuing a ICE_PTP_READ_TIME command which triggers a + * simultaneous read of the PHY timer and PHC timer. Then we use the + * difference to calculate an appropriate 2s complement addition to add + * to the PHY timer in order to ensure it reads the same value as the + * primary PHC timer. */ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) { @@ -2285,7 +2420,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) if (err) goto err_unlock; - err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME); + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME); if (err) goto err_unlock; @@ -2408,7 +2543,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) if (err) return err; - err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); if (err) return err; @@ -2436,7 +2571,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) if (err) return err; - err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); if (err) return err; @@ -2685,28 +2820,39 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) * @lport: the lport to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the timestamp block of the - * external PHY on the E810 device. + * Read the timestamp and then forcibly overwrite its value to clear the valid + * bit from the timestamp block of the external PHY on the E810 device. + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready(). */ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) { u32 lo_addr, hi_addr; + u64 unused_tstamp; int err; + err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n", + lport, idx, err); + return err; + } + lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); err = ice_write_phy_reg_e810(hw, lo_addr, 0); if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", - err); + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n", + lport, idx, err); return err; } err = ice_write_phy_reg_e810(hw, hi_addr, 0); if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", - err); + ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n", + lport, idx, err); return err; } @@ -2757,7 +2903,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw) * * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the * initial clock time. The time will not actually be programmed until the - * driver issues an INIT_TIME command. + * driver issues an ICE_PTP_INIT_TIME command. * * The time value is the upper 32 bits of the PHY timer, usually in units of * nominal nanoseconds. @@ -2792,7 +2938,7 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) * * Prepare the PHY port for an atomic adjustment by programming the PHY * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment - * is completed by issuing an ADJ_TIME sync command. + * is completed by issuing an ICE_PTP_ADJ_TIME sync command. * * The adjustment value only contains the portion used for the upper 32bits of * the PHY timer, usually in units of nominal nanoseconds. Negative @@ -2832,7 +2978,7 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) * * Prepare the PHY port for a new increment value by programming the PHY * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is - * completed by issuing an INIT_INCVAL command. + * completed by issuing an ICE_PTP_INIT_INCVAL command. */ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) { @@ -2875,19 +3021,19 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) int err; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val = GLTSYN_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val = GLTSYN_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val = GLTSYN_CMD_ADJ_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val = GLTSYN_CMD_READ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; break; case ICE_PTP_NOP: @@ -3149,6 +3295,21 @@ void ice_ptp_unlock(struct ice_hw *hw) wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0); } +/** + * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type + * @hw: pointer to the HW structure + * + * Determine the PHY model for the device, and initialize hw->phy_model + * for use by other functions. + */ +void ice_ptp_init_phy_model(struct ice_hw *hw) +{ + if (ice_is_e810(hw)) + hw->phy_model = ICE_PHY_E810; + else + hw->phy_model = ICE_PHY_E822; +} + /** * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command * @hw: pointer to HW struct @@ -3167,10 +3328,17 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) ice_ptp_src_cmd(hw, cmd); /* Next, prepare the ports */ - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_port_cmd_e810(hw, cmd); - else + break; + case ICE_PHY_E822: err = ice_ptp_port_cmd_e822(hw, cmd); + break; + default: + err = -EOPNOTSUPP; + } + if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", cmd, err); @@ -3212,14 +3380,21 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); - else + break; + case ICE_PHY_E822: err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); + break; + default: + err = -EOPNOTSUPP; + } + if (err) return err; - return ice_ptp_tmr_cmd(hw, INIT_TIME); + return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME); } /** @@ -3232,8 +3407,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) * * 1) Write the increment value to the source timer shadow registers * 2) Write the increment value to the PHY timer shadow registers - * 3) Issue an INIT_INCVAL timer command to synchronously switch both the - * source and port timers to the new increment value at the next clock + * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both + * the source and port timers to the new increment value at the next clock * cycle. */ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) @@ -3247,14 +3422,21 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); - else + break; + case ICE_PHY_E822: err = ice_ptp_prep_phy_incval_e822(hw, incval); + break; + default: + err = -EOPNOTSUPP; + } + if (err) return err; - return ice_ptp_tmr_cmd(hw, INIT_INCVAL); + return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL); } /** @@ -3288,8 +3470,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) * * 1) Write the adjustment to the source timer shadow registers * 2) Write the adjustment to the PHY timer shadow registers - * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to - * both the source and port timers at the next clock cycle. + * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the + * adjustment to both the source and port timers at the next clock cycle. */ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) { @@ -3299,21 +3481,28 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Write the desired clock adjustment into the GLTSYN_SHADJ register. - * For an ADJ_TIME command, this set of registers represents the value - * to add to the clock time. It supports subtraction by interpreting - * the value as a 2's complement integer. + * For an ICE_PTP_ADJ_TIME command, this set of registers represents + * the value to add to the clock time. It supports subtraction by + * interpreting the value as a 2's complement integer. */ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); - else + break; + case ICE_PHY_E822: err = ice_ptp_prep_phy_adj_e822(hw, adj); + break; + default: + err = -EOPNOTSUPP; + } + if (err) return err; - return ice_ptp_tmr_cmd(hw, ADJ_TIME); + return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME); } /** @@ -3329,10 +3518,14 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - else + case ICE_PHY_E822: return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); + default: + return -EOPNOTSUPP; + } } /** @@ -3341,16 +3534,71 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) * @block: the block to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the timestamp block. For - * E822 devices, the block is the quad to clear from. For E810 devices, the - * block is the logical port to clear from. + * Clear a timestamp from the timestamp block, discarding its value without + * returning it. This resets the memory status bit for the timestamp index + * allowing it to be reused for another timestamp in the future. + * + * For E822 devices, the block number is the PHY quad to clear from. For E810 + * devices, the block number is the logical port to clear from. + * + * This function must only be called on a timestamp index whose valid bit is + * set according to ice_get_phy_tx_tstamp_ready(). */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - else + case ICE_PHY_E822: return ice_clear_phy_tstamp_e822(hw, block, idx); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_get_pf_c827_idx - find and return the C827 index for the current pf + * @hw: pointer to the hw struct + * @idx: index of the found C827 PHY + * Return: + * * 0 - success + * * negative - failure + */ +static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) +{ + struct ice_aqc_get_link_topo cmd; + u8 node_part_number; + u16 node_handle; + int status; + u8 ctx; + + if (hw->mac_type != ICE_MAC_E810) + return -ENODEV; + + if (hw->device_id != ICE_DEV_ID_E810C_QSFP) { + *idx = C827_0; + return 0; + } + + memset(&cmd, 0, sizeof(cmd)); + + ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S; + ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; + cmd.addr.topo_params.node_type_ctx = ctx; + + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) + return -ENOENT; + + if (node_handle == E810C_QSFP_C827_0_HANDLE) + *idx = C827_0; + else if (node_handle == E810C_QSFP_C827_1_HANDLE) + *idx = C827_1; + else + return -EIO; + + return 0; } /** @@ -3359,10 +3607,14 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) */ void ice_ptp_reset_ts_memory(struct ice_hw *hw) { - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E822: + ice_ptp_reset_ts_memory_e822(hw); + break; + case ICE_PHY_E810: + default: return; - - ice_ptp_reset_ts_memory_e822(hw); + } } /** @@ -3381,10 +3633,14 @@ int ice_ptp_init_phc(struct ice_hw *hw) /* Clear event err indications for auxiliary pins */ (void)rd32(hw, GLTSYN_STAT(src_idx)); - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_ptp_init_phc_e810(hw); - else + case ICE_PHY_E822: return ice_ptp_init_phc_e822(hw); + default: + return -EOPNOTSUPP; + } } /** @@ -3400,10 +3656,362 @@ int ice_ptp_init_phc(struct ice_hw *hw) */ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) { - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); - else + case ICE_PHY_E822: return ice_get_phy_tx_tstamp_ready_e822(hw, block, tstamp_ready); + break; + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_cgu_get_pin_desc_e823 - get pin description array + * @hw: pointer to the hw struct + * @input: if request is done against input or output pin + * @size: number of inputs/outputs + * + * Return: pointer to pin description array associated to given hw. + */ +static const struct ice_cgu_pin_desc * +ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size) +{ + static const struct ice_cgu_pin_desc *t; + + if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) { + if (input) { + t = ice_e823_zl_cgu_inputs; + *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs); + } else { + t = ice_e823_zl_cgu_outputs; + *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs); + } + } else if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) { + if (input) { + t = ice_e823_si_cgu_inputs; + *size = ARRAY_SIZE(ice_e823_si_cgu_inputs); + } else { + t = ice_e823_si_cgu_outputs; + *size = ARRAY_SIZE(ice_e823_si_cgu_outputs); + } + } else { + t = NULL; + *size = 0; + } + + return t; +} + +/** + * ice_cgu_get_pin_desc - get pin description array + * @hw: pointer to the hw struct + * @input: if request is done against input or output pins + * @size: size of array returned by function + * + * Return: pointer to pin description array associated to given hw. + */ +static const struct ice_cgu_pin_desc * +ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size) +{ + const struct ice_cgu_pin_desc *t = NULL; + + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + if (input) { + t = ice_e810t_sfp_cgu_inputs; + *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs); + } else { + t = ice_e810t_sfp_cgu_outputs; + *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs); + } + break; + case ICE_DEV_ID_E810C_QSFP: + if (input) { + t = ice_e810t_qsfp_cgu_inputs; + *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs); + } else { + t = ice_e810t_qsfp_cgu_outputs; + *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs); + } + break; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + t = ice_cgu_get_pin_desc_e823(hw, input, size); + break; + default: + break; + } + + return t; +} + +/** + * ice_cgu_get_pin_type - get pin's type + * @hw: pointer to the hw struct + * @pin: pin index + * @input: if request is done against input or output pin + * + * Return: type of a pin. + */ +enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input) +{ + const struct ice_cgu_pin_desc *t; + int t_size; + + t = ice_cgu_get_pin_desc(hw, input, &t_size); + + if (!t) + return 0; + + if (pin >= t_size) + return 0; + + return t[pin].type; +} + +/** + * ice_cgu_get_pin_freq_supp - get pin's supported frequency + * @hw: pointer to the hw struct + * @pin: pin index + * @input: if request is done against input or output pin + * @num: output number of supported frequencies + * + * Get frequency supported number and array of supported frequencies. + * + * Return: array of supported frequencies for given pin. + */ +struct dpll_pin_frequency * +ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num) +{ + const struct ice_cgu_pin_desc *t; + int t_size; + + *num = 0; + t = ice_cgu_get_pin_desc(hw, input, &t_size); + if (!t) + return NULL; + if (pin >= t_size) + return NULL; + *num = t[pin].freq_supp_num; + + return t[pin].freq_supp; +} + +/** + * ice_cgu_get_pin_name - get pin's name + * @hw: pointer to the hw struct + * @pin: pin index + * @input: if request is done against input or output pin + * + * Return: + * * null terminated char array with name + * * NULL in case of failure + */ +const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input) +{ + const struct ice_cgu_pin_desc *t; + int t_size; + + t = ice_cgu_get_pin_desc(hw, input, &t_size); + + if (!t) + return NULL; + + if (pin >= t_size) + return NULL; + + return t[pin].name; +} + +/** + * ice_get_cgu_state - get the state of the DPLL + * @hw: pointer to the hw struct + * @dpll_idx: Index of internal DPLL unit + * @last_dpll_state: last known state of DPLL + * @pin: pointer to a buffer for returning currently active pin + * @ref_state: reference clock state + * @eec_mode: eec mode of the DPLL + * @phase_offset: pointer to a buffer for returning phase offset + * @dpll_state: state of the DPLL (output) + * + * This function will read the state of the DPLL(dpll_idx). Non-null + * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to + * retrieve currently active pin, state, mode and phase_offset respectively. + * + * Return: state of the DPLL + */ +int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, + enum dpll_lock_status last_dpll_state, u8 *pin, + u8 *ref_state, u8 *eec_mode, s64 *phase_offset, + enum dpll_lock_status *dpll_state) +{ + u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config; + s64 hw_phase_offset; + int status; + + status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state, + &hw_dpll_state, &hw_config, + &hw_phase_offset, &hw_eec_mode); + if (status) + return status; + + if (pin) + /* current ref pin in dpll_state_refsel_status_X register */ + *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL; + if (phase_offset) + *phase_offset = hw_phase_offset; + if (ref_state) + *ref_state = hw_ref_state; + if (eec_mode) + *eec_mode = hw_eec_mode; + if (!dpll_state) + return 0; + + /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ + * it would never return to FREERUN. This aligns to ITU-T G.781 + * Recommendation. We cannot report HOLDOVER as HO memory is cleared + * while switching to another reference. + * Only for situations where previous state was either: "LOCKED without + * HO_ACQ" or "HOLDOVER" we actually back to FREERUN. + */ + if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) { + if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY) + *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ; + else + *dpll_state = DPLL_LOCK_STATUS_LOCKED; + } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ || + last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) { + *dpll_state = DPLL_LOCK_STATUS_HOLDOVER; + } else { + *dpll_state = DPLL_LOCK_STATUS_UNLOCKED; + } + + return 0; +} + +/** + * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins + * @hw: pointer to the hw struct + * @base_idx: returns index of first recovered clock pin on device + * @pin_num: returns number of recovered clock pins available on device + * + * Based on hw provide caller info about recovery clock pins available on the + * board. + * + * Return: + * * 0 - success, information is valid + * * negative - failure, information is not valid + */ +int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num) +{ + u8 phy_idx; + int ret; + + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810C_QSFP: + + ret = ice_get_pf_c827_idx(hw, &phy_idx); + if (ret) + return ret; + *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN); + *pin_num = ICE_E810_RCLK_PINS_NUM; + ret = 0; + break; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + *pin_num = ICE_E822_RCLK_PINS_NUM; + ret = 0; + if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) + *base_idx = ZL_REF1P; + else if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) + *base_idx = SI_REF1P; + else + ret = -ENODEV; + + break; + default: + ret = -ENODEV; + break; + } + + return ret; +} + +/** + * ice_cgu_get_output_pin_state_caps - get output pin state capabilities + * @hw: pointer to the hw struct + * @pin_id: id of a pin + * @caps: capabilities to modify + * + * Return: + * * 0 - success, state capabilities were modified + * * negative - failure, capabilities were not modified + */ +int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, + unsigned long *caps) +{ + bool can_change = true; + + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3) + can_change = false; + break; + case ICE_DEV_ID_E810C_QSFP: + if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4) + can_change = false; + break; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 && + pin_id == ZL_OUT2) + can_change = false; + else if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 && + pin_id == SI_OUT1) + can_change = false; + break; + default: + return -EINVAL; + } + if (can_change) + *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + else + *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 9aa10b0426..cf76701566 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -3,13 +3,14 @@ #ifndef _ICE_PTP_HW_H_ #define _ICE_PTP_HW_H_ +#include enum ice_ptp_tmr_cmd { - INIT_TIME, - INIT_INCVAL, - ADJ_TIME, - ADJ_TIME_AT_TIME, - READ_TIME, + ICE_PTP_INIT_TIME, + ICE_PTP_INIT_INCVAL, + ICE_PTP_ADJ_TIME, + ICE_PTP_ADJ_TIME_AT_TIME, + ICE_PTP_READ_TIME, ICE_PTP_NOP, }; @@ -110,6 +111,77 @@ struct ice_cgu_pll_params_e822 { u32 post_pll_div; }; +#define E810C_QSFP_C827_0_HANDLE 2 +#define E810C_QSFP_C827_1_HANDLE 3 +enum ice_e810_c827_idx { + C827_0, + C827_1 +}; + +enum ice_phy_rclk_pins { + ICE_RCLKA_PIN = 0, /* SCL pin */ + ICE_RCLKB_PIN, /* SDA pin */ +}; + +#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1) +#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) +#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \ + (_pin) + ZL_REF1P) + +enum ice_zl_cgu_in_pins { + ZL_REF0P = 0, + ZL_REF0N, + ZL_REF1P, + ZL_REF1N, + ZL_REF2P, + ZL_REF2N, + ZL_REF3P, + ZL_REF3N, + ZL_REF4P, + ZL_REF4N, + NUM_ZL_CGU_INPUT_PINS +}; + +enum ice_zl_cgu_out_pins { + ZL_OUT0 = 0, + ZL_OUT1, + ZL_OUT2, + ZL_OUT3, + ZL_OUT4, + ZL_OUT5, + ZL_OUT6, + NUM_ZL_CGU_OUTPUT_PINS +}; + +enum ice_si_cgu_in_pins { + SI_REF0P = 0, + SI_REF0N, + SI_REF1P, + SI_REF1N, + SI_REF2P, + SI_REF2N, + SI_REF3, + SI_REF4, + NUM_SI_CGU_INPUT_PINS +}; + +enum ice_si_cgu_out_pins { + SI_OUT0 = 0, + SI_OUT1, + SI_OUT2, + SI_OUT3, + SI_OUT4, + NUM_SI_CGU_OUTPUT_PINS +}; + +struct ice_cgu_pin_desc { + char *name; + u8 index; + enum dpll_pin_type type; + u32 freq_supp_num; + struct dpll_pin_frequency *freq_supp; +}; + extern const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; @@ -131,6 +203,7 @@ extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD]; u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); bool ice_ptp_lock(struct ice_hw *hw); void ice_ptp_unlock(struct ice_hw *hw); +void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd); int ice_ptp_init_time(struct ice_hw *hw, u64 time); int ice_ptp_write_incval(struct ice_hw *hw, u64 incval); int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); @@ -197,6 +270,20 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw); int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); +bool ice_is_pca9575_present(struct ice_hw *hw); +enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input); +struct dpll_pin_frequency * +ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num); +const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input); +int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, + enum dpll_lock_status last_dpll_state, u8 *pin, + u8 *ref_state, u8 *eec_mode, s64 *phase_offset, + enum dpll_lock_status *dpll_state); +int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num); + +void ice_ptp_init_phy_model(struct ice_hw *hw); +int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, + unsigned long *caps); #define PFTSYN_SEM_BYTES 4 diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index c0533d7b66..2f4a621254 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -229,29 +229,22 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, * ice_sched_remove_elems - remove nodes from HW * @hw: pointer to the HW struct * @parent: pointer to the parent node - * @num_nodes: number of nodes - * @node_teids: array of node teids to be deleted + * @node_teid: node teid to be deleted * * This function remove nodes from HW */ static int ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, - u16 num_nodes, u32 *node_teids) + u32 node_teid) { - struct ice_aqc_delete_elem *buf; - u16 i, num_groups_removed = 0; - u16 buf_size; + DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1); + u16 buf_size = __struct_size(buf); + u16 num_groups_removed = 0; int status; - buf_size = struct_size(buf, teid, num_nodes); - buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->hdr.parent_teid = parent->info.node_teid; - buf->hdr.num_elems = cpu_to_le16(num_nodes); - for (i = 0; i < num_nodes; i++) - buf->teid[i] = cpu_to_le32(node_teids[i]); + buf->hdr.num_elems = cpu_to_le16(1); + buf->teid[0] = cpu_to_le32(node_teid); status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); @@ -259,7 +252,6 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", hw->adminq.sq_last_status); - devm_kfree(ice_hw_to_dev(hw), buf); return status; } @@ -326,7 +318,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - ice_sched_remove_elems(hw, node->parent, 1, &teid); + ice_sched_remove_elems(hw, node->parent, teid); } parent = node->parent; /* root has no parent */ @@ -437,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, } /** - * ice_aq_move_sched_elems - move scheduler elements + * ice_aq_move_sched_elems - move scheduler element (just 1 group) * @hw: pointer to the HW struct - * @grps_req: number of groups to move * @buf: pointer to buffer * @buf_size: buffer size in bytes * @grps_movd: returns total number of groups moved - * @cd: pointer to command details structure or NULL * * Move scheduling elements (0x0408) */ int -ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, - struct ice_aqc_move_elem *buf, u16 buf_size, - u16 *grps_movd, struct ice_sq_cd *cd) +ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf, + u16 buf_size, u16 *grps_movd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, - grps_req, (void *)buf, buf_size, - grps_movd, cd); + 1, buf, buf_size, grps_movd, NULL); } /** @@ -1193,7 +1181,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) int status; /* remove the default leaf node */ - status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); + status = ice_sched_remove_elems(pi->hw, node->parent, teid); if (!status) ice_free_sched_node(pi, node); } @@ -2232,12 +2220,12 @@ int ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { - struct ice_aqc_move_elem *buf; + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); + u16 buf_len = __struct_size(buf); struct ice_sched_node *node; u16 i, grps_movd = 0; struct ice_hw *hw; int status = 0; - u16 buf_len; hw = pi->hw; @@ -2249,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, hw->max_children[parent->tx_sched_layer]) return -ENOSPC; - buf_len = struct_size(buf, teid, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - for (i = 0; i < num_items; i++) { node = ice_sched_find_node_by_teid(pi->root, list[i]); if (!node) { status = -EINVAL; - goto move_err_exit; + break; } buf->hdr.src_parent_teid = node->info.parent_teid; buf->hdr.dest_parent_teid = parent->info.node_teid; buf->teid[0] = node->info.node_teid; buf->hdr.num_elems = cpu_to_le16(1); - status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, - &grps_movd, NULL); + status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd); if (status && grps_movd != 1) { status = -EIO; - goto move_err_exit; + break; } /* update the SW DB */ ice_sched_update_parent(parent, node); } -move_err_exit: - kfree(buf); return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 0055d9330c..1aef05ea5a 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -161,10 +161,8 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, u16 *num_nodes_added); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw); -int -ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, - struct ice_aqc_move_elem *buf, u16 buf_size, - u16 *grps_movd, struct ice_sq_cd *cd); +int ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf, + u16 buf_size, u16 *grps_movd); int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 31314e7540..e1494f24f6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -64,7 +64,7 @@ static void ice_free_vf_res(struct ice_vf *vf) vf->num_mac = 0; } - last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; + last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; /* clear VF MDD event information */ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); @@ -102,7 +102,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); first = vf->first_vector_idx; - last = first + pf->vfs.num_msix_per - 1; + last = first + vf->num_msix - 1; for (v = first; v <= last; v++) { u32 reg; @@ -138,6 +138,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) if (!pf) return -EINVAL; + bitmap_free(pf->sriov_irq_bm); + pf->sriov_irq_size = 0; pf->sriov_base_vector = 0; return 0; @@ -244,22 +246,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) return vsi; } -/** - * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space - * @pf: pointer to PF structure - * @vf: pointer to VF that the first MSIX vector index is being calculated for - * - * This returns the first MSIX vector index in PF space that is used by this VF. - * This index is used when accessing PF relative registers such as - * GLINT_VECT2FUNC and GLINT_DYN_CTL. - * This will always be the OICR index in the AVF driver so any functionality - * using vf->first_vector_idx for queue configuration will have to increment by - * 1 to avoid meddling with the OICR index. - */ -static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) -{ - return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; -} /** * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware @@ -280,12 +266,12 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) hw = &pf->hw; pf_based_first_msix = vf->first_vector_idx; - pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; + pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1; device_based_first_msix = pf_based_first_msix + pf->hw.func_caps.common_cap.msix_vector_first_id; device_based_last_msix = - (device_based_first_msix + pf->vfs.num_msix_per) - 1; + (device_based_first_msix + vf->num_msix) - 1; device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & @@ -388,16 +374,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) */ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) { - struct ice_pf *pf; - if (!vf || !q_vector) return -EINVAL; - pf = vf->pf; - /* always add one to account for the OICR being the first MSIX */ - return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + - q_vector->v_idx + 1; + return vf->first_vector_idx + q_vector->v_idx + 1; } /** @@ -526,6 +507,52 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return 0; } +/** + * ice_sriov_get_irqs - get irqs for SR-IOV usacase + * @pf: pointer to PF structure + * @needed: number of irqs to get + * + * This returns the first MSI-X vector index in PF space that is used by this + * VF. This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality + * using vf->first_vector_idx for queue configuration_id: id of VF which will + * use this irqs + * + * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are + * allocated from the end of global irq index. First bit in sriov_irq_bm means + * last irq index etc. It simplifies extension of SRIOV vectors. + * They will be always located from sriov_base_vector to the last irq + * index. While increasing/decreasing sriov_base_vector can be moved. + */ +static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed) +{ + int res = bitmap_find_next_zero_area(pf->sriov_irq_bm, + pf->sriov_irq_size, 0, needed, 0); + /* conversion from number in bitmap to global irq index */ + int index = pf->sriov_irq_size - res - needed; + + if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector) + return -ENOENT; + + bitmap_set(pf->sriov_irq_bm, res, needed); + return index; +} + +/** + * ice_sriov_free_irqs - free irqs used by the VF + * @pf: pointer to PF structure + * @vf: pointer to VF structure + */ +static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf) +{ + /* Move back from first vector index to first index in bitmap */ + int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix; + + bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix); + vf->first_vector_idx = 0; +} + /** * ice_init_vf_vsi_res - initialize/setup VF VSI resources * @vf: VF to initialize/setup the VSI for @@ -539,7 +566,9 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) struct ice_vsi *vsi; int err; - vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); + vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + return -ENOMEM; vsi = ice_vf_vsi_setup(vf); if (!vsi) @@ -789,14 +818,19 @@ static const struct ice_vf_ops ice_sriov_vf_ops = { */ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) { + struct pci_dev *pdev = pf->pdev; struct ice_vfs *vfs = &pf->vfs; + struct pci_dev *vfdev = NULL; struct ice_vf *vf; - u16 vf_id; - int err; + u16 vf_pdev_id; + int err, pos; lockdep_assert_held(&vfs->table_lock); - for (vf_id = 0; vf_id < num_vfs; vf_id++) { + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id); + + for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) { vf = kzalloc(sizeof(*vf), GFP_KERNEL); if (!vf) { err = -ENOMEM; @@ -812,11 +846,28 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) ice_initialize_vf_entry(vf); + do { + vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev); + } while (vfdev && vfdev->physfn != pdev); + vf->vfdev = vfdev; vf->vf_sw_id = pf->first_sw; + pci_dev_get(vfdev); + + /* set default number of MSI-X */ + vf->num_msix = pf->vfs.num_msix_per; + vf->num_vf_qs = pf->vfs.num_qps_per; + ice_vc_set_default_allowlist(vf); + hash_add_rcu(vfs->table, &vf->entry, vf_id); } + /* Decrement of refcount done by pci_get_device() inside the loop does + * not touch the last iteration's vfdev, so it has to be done manually + * to balance pci_dev_get() added within the loop. + */ + pci_dev_put(vfdev); + return 0; err_free_entries: @@ -831,10 +882,16 @@ err_free_entries: */ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) { + int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int ret; + pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL); + if (!pf->sriov_irq_bm) + return -ENOMEM; + pf->sriov_irq_size = total_vectors; + /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); @@ -893,6 +950,7 @@ err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); clear_bit(ICE_OICR_INTR_DIS, pf->state); + bitmap_free(pf->sriov_irq_bm); return ret; } @@ -956,6 +1014,175 @@ static int ice_check_sriov_allowed(struct ice_pf *pf) return 0; } +/** + * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs + * @pdev: pointer to pci_dev struct + * + * The function is called via sysfs ops + */ +u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf); +} + +static int ice_sriov_move_base_vector(struct ice_pf *pf, int move) +{ + if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf)) + return -ENOMEM; + + pf->sriov_base_vector -= move; + return 0; +} + +static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) +{ + u16 vf_ids[ICE_MAX_SRIOV_VFS]; + struct ice_vf *tmp_vf; + int to_remap = 0, bkt; + + /* For better irqs usage try to remap irqs of VFs + * that aren't running yet + */ + ice_for_each_vf(pf, bkt, tmp_vf) { + /* skip VF which is changing the number of MSI-X */ + if (restricted_id == tmp_vf->vf_id || + test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states)) + continue; + + ice_dis_vf_mappings(tmp_vf); + ice_sriov_free_irqs(pf, tmp_vf); + + vf_ids[to_remap] = tmp_vf->vf_id; + to_remap += 1; + } + + for (int i = 0; i < to_remap; i++) { + tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]); + if (!tmp_vf) + continue; + + tmp_vf->first_vector_idx = + ice_sriov_get_irqs(pf, tmp_vf->num_msix); + /* there is no need to rebuild VSI as we are only changing the + * vector indexes not amount of MSI-X or queues + */ + ice_ena_vf_mappings(tmp_vf); + ice_put_vf(tmp_vf); + } +} + +/** + * ice_sriov_set_msix_vec_count + * @vf_dev: pointer to pci_dev struct of VF device + * @msix_vec_count: new value for MSI-X amount on this VF + * + * Set requested MSI-X, queues and registers for @vf_dev. + * + * First do some sanity checks like if there are any VFs, if the new value + * is correct etc. Then disable old mapping (MSI-X and queues registers), change + * MSI-X and queues, rebuild VSI and enable new mapping. + * + * If it is possible (driver not binded to VF) try to remap also other VFs to + * linearize irqs register usage. + */ +int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) +{ + struct pci_dev *pdev = pci_physfn(vf_dev); + struct ice_pf *pf = pci_get_drvdata(pdev); + u16 prev_msix, prev_queues, queues; + bool needs_rebuild = false; + struct ice_vf *vf; + int id; + + if (!ice_get_num_vfs(pf)) + return -ENOENT; + + if (!msix_vec_count) + return 0; + + queues = msix_vec_count; + /* add 1 MSI-X for OICR */ + msix_vec_count += 1; + + if (queues > min(ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf))) + return -EINVAL; + + if (msix_vec_count < ICE_MIN_INTR_PER_VF) + return -EINVAL; + + /* Transition of PCI VF function number to function_id */ + for (id = 0; id < pci_num_vf(pdev); id++) { + if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id)) + break; + } + + if (id == pci_num_vf(pdev)) + return -ENOENT; + + vf = ice_get_vf_by_id(pf, id); + + if (!vf) + return -ENOENT; + + prev_msix = vf->num_msix; + prev_queues = vf->num_vf_qs; + + if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) { + ice_put_vf(vf); + return -ENOSPC; + } + + ice_dis_vf_mappings(vf); + ice_sriov_free_irqs(pf, vf); + + /* Remap all VFs beside the one is now configured */ + ice_sriov_remap_vectors(pf, vf->vf_id); + + vf->num_msix = msix_vec_count; + vf->num_vf_qs = queues; + vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + goto unroll; + + ice_vf_vsi_release(vf); + if (vf->vf_ops->create_vsi(vf)) { + /* Try to rebuild with previous values */ + needs_rebuild = true; + goto unroll; + } + + dev_info(ice_pf_to_dev(pf), + "Changing VF %d resources to %d vectors and %d queues\n", + vf->vf_id, vf->num_msix, vf->num_vf_qs); + + ice_ena_vf_mappings(vf); + ice_put_vf(vf); + + return 0; + +unroll: + dev_info(ice_pf_to_dev(pf), + "Can't set %d vectors on VF %d, falling back to %d\n", + vf->num_msix, vf->vf_id, prev_msix); + + vf->num_msix = prev_msix; + vf->num_vf_qs = prev_queues; + vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + return -EINVAL; + + if (needs_rebuild) + vf->vf_ops->create_vsi(vf); + + ice_ena_vf_mappings(vf); + ice_put_vf(vf); + + return -EINVAL; +} + /** * ice_sriov_configure - Enable or change number of VFs via sysfs * @pdev: pointer to a pci_dev structure @@ -1709,31 +1936,16 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) /** * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR - * @pdev: pointer to a pci_dev structure + * @pf: pointer to the PF structure * * Called when recovering from a PF FLR to restore interrupt capability to * the VFs. */ -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) +void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { - u16 vf_id; - int pos; - - if (!pci_num_vf(pdev)) - return; + struct ice_vf *vf; + u32 bkt; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (pos) { - struct pci_dev *vfdev; - - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, - &vf_id); - vfdev = pci_get_device(pdev->vendor, vf_id, NULL); - while (vfdev) { - if (vfdev->is_virtfn && vfdev->physfn == pdev) - pci_restore_msi_state(vfdev); - vfdev = pci_get_device(pdev->vendor, vf_id, - vfdev); - } - } + ice_for_each_vf(pf, bkt, vf) + pci_restore_msi_state(vf->vfdev); } diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 346cb2666f..8488df38b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -33,7 +33,7 @@ int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); void ice_free_vfs(struct ice_pf *pf); -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); +void ice_restore_all_vfs_msi_state(struct ice_pf *pf); int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, @@ -60,6 +60,8 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); bool ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); +u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev); +int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count); #else /* CONFIG_PCI_IOV */ static inline void ice_process_vflr_event(struct ice_pf *pf) { } static inline void ice_free_vfs(struct ice_pf *pf) { } @@ -67,7 +69,7 @@ static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } -static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { } +static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { } static inline int ice_sriov_configure(struct pci_dev __always_unused *pdev, @@ -142,5 +144,16 @@ ice_get_vf_stats(struct net_device __always_unused *netdev, { return -EOPNOTSUPP; } + +static inline u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) +{ + return 0; +} + +static inline int +ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 2f77b684ff..ee19f3aa3d 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1812,15 +1812,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { - struct ice_aqc_alloc_free_res_elem *sw_buf; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); struct ice_aqc_res_elem *vsi_ele; - u16 buf_len; int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -1840,8 +1836,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = -EINVAL; - goto ice_aq_alloc_free_vsi_list_exit; + return -EINVAL; } if (opc == ice_aqc_opc_free_res) @@ -1849,16 +1844,14 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc); if (status) - goto ice_aq_alloc_free_vsi_list_exit; + return status; if (opc == ice_aqc_opc_alloc_res) { vsi_ele = &sw_buf->elem[0]; *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); } -ice_aq_alloc_free_vsi_list_exit: - devm_kfree(ice_hw_to_dev(hw), sw_buf); - return status; + return 0; } /** @@ -2088,15 +2081,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, */ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { - struct ice_aqc_alloc_free_res_elem *sw_buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = kzalloc(buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; - sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << ICE_AQC_RES_TYPE_S) | @@ -2105,7 +2093,6 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) ice_aqc_opc_alloc_res); if (!status) *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); - kfree(sw_buf); return status; } @@ -4434,28 +4421,19 @@ int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Allocate resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); if (status) - goto exit; + return status; *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); - -exit: - kfree(buf); return status; } @@ -4471,16 +4449,10 @@ int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Free resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); @@ -4490,7 +4462,6 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, if (status) ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); - kfree(buf); return status; } @@ -4508,15 +4479,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, */ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(1); if (shared) buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -4534,7 +4500,6 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n", type, res_id, shared ? "SHARED" : "DEDICATED"); - kfree(buf); return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 24c9140159..9170a3e8f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2305,9 +2305,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) return; - if (!tx_ring->ptp_tx) - return; - /* Tx timestamps cannot be sampled when doing TSO */ if (first->tx_flags & ICE_TX_FLAGS_TSO) return; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 407d4c3200..b28b9826bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -381,7 +381,6 @@ struct ice_tx_ring { #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) u8 flags; u8 dcb_tc; /* Traffic class of ring */ - u8 ptp_tx; } ____cacheline_internodealigned_in_smp; static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index c8322fb6f2..7e06373e14 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx]; if (xdp_res & ICE_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_res & ICE_XDP_TX) { if (static_branch_unlikely(&ice_xdp_locking_key)) diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 5e353b0cbe..a18ca0ff87 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ @@ -129,6 +129,7 @@ enum ice_set_fc_aq_failures { enum ice_mac_type { ICE_MAC_UNKNOWN = 0, ICE_MAC_E810, + ICE_MAC_E830, ICE_MAC_GENERIC, }; @@ -822,6 +823,13 @@ struct ice_mbx_data { u16 async_watermark_val; }; +/* PHY model */ +enum ice_phy_model { + ICE_PHY_UNSUP = -1, + ICE_PHY_E810 = 1, + ICE_PHY_E822, +}; + /* Port hardware description */ struct ice_hw { u8 __iomem *hw_addr; @@ -843,6 +851,7 @@ struct ice_hw { u8 revision_id; u8 pf_id; /* device profile info */ + enum ice_phy_model phy_model; u16 max_burst_size; /* driver sets this value */ @@ -901,17 +910,20 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_PHY_PER_NAC 1 -#define ICE_MAX_QUAD 2 -#define ICE_NUM_QUAD_TYPE 2 -#define ICE_PORTS_PER_QUAD 4 -#define ICE_PHY_0_LAST_QUAD 1 -#define ICE_PORTS_PER_PHY 8 -#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY +#define ICE_PHY_PER_NAC_E822 1 +#define ICE_MAX_QUAD 2 +#define ICE_QUADS_PER_PHY_E822 2 +#define ICE_PORTS_PER_PHY_E822 8 +#define ICE_PORTS_PER_QUAD 4 +#define ICE_PORTS_PER_PHY_E810 4 +#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; + u32 pkg_seg_id; + u32 pkg_sign_type; u32 active_track_id; + u8 pkg_has_signing_seg:1; u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; @@ -965,6 +977,7 @@ struct ice_hw { DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX); u8 dvm_ena; u16 io_expander_handle; + u8 cgu_part_number; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index d488c7156d..b7ae099521 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -56,6 +56,8 @@ static void ice_release_vf(struct kref *ref) { struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); + pci_dev_put(vf->vfdev); + vf->vf_ops->free(vf); } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 48fea6fa03..93c774f2f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -72,7 +72,7 @@ struct ice_vfs { struct mutex table_lock; /* Lock for protecting the hash table */ u16 num_supported; /* max supported VFs on this PF */ u16 num_qps_per; /* number of queue pairs per VF */ - u16 num_msix_per; /* number of MSI-X vectors per VF */ + u16 num_msix_per; /* default MSI-X vectors per VF */ unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */ }; @@ -82,7 +82,7 @@ struct ice_vf { struct rcu_head rcu; struct kref refcnt; struct ice_pf *pf; - + struct pci_dev *vfdev; /* Used during virtchnl message handling and NDO ops against the VF * that will trigger a VFR */ @@ -123,6 +123,9 @@ struct ice_vf { u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; u16 num_vf_qs; /* num of queue configured per VF */ + u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ +#define ICE_INNER_VLAN_STRIP_ENA BIT(0) +#define ICE_OUTER_VLAN_STRIP_ENA BIT(1) struct ice_mdd_vf_events mdd_rx_events; struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); @@ -133,6 +136,8 @@ struct ice_vf { /* devlink port data */ struct devlink_port devlink_port; + + u16 num_msix; /* num of MSI-X configured on this VF */ }; /* Flags for controlling behavior of ice_reset_vf */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 62337e6569..8872f7a4f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -486,6 +486,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC; + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; @@ -498,7 +501,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; - vfres->max_vectors = vf->pf->vfs.num_msix_per; + vfres->max_vectors = vf->num_msix; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; vfres->rss_lut_size = ICE_LUT_VSI_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf); @@ -820,8 +823,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) int status; lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : - ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -829,11 +832,9 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) goto error_param; } - ctx->info.q_opt_rss = ((lut_type << - ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - (hash_type & - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + ctx->info.q_opt_rss = + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); /* Preserve existing queueing option setting */ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & @@ -1520,7 +1521,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) u16 num_q_vectors_mapped, vsi_id, vector_id; struct virtchnl_irq_map_info *irqmap_info; struct virtchnl_vector_map *map; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; @@ -1532,7 +1532,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) * there is actually at least a single VF queue vector mapped */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - pf->vfs.num_msix_per < num_q_vectors_mapped || + vf->num_msix < num_q_vectors_mapped || !num_q_vectors_mapped) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -1554,7 +1554,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF */ - if (!(vector_id < pf->vfs.num_msix_per) || + if (!(vector_id < vf->num_msix) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1635,6 +1635,15 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + for (i = 0; i < qci->num_queue_pairs; i++) { + if (!qci->qpair[i].rxq.crc_disable) + continue; + + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) || + vf->vlan_strip_ena) + goto error_param; + } + for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; if (qpi->txq.vsi_id != qci->vsi_id || @@ -1681,6 +1690,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; + if (qpi->rxq.crc_disable) + vsi->rx_rings[q_idx]->flags |= + ICE_RX_FLAGS_CRC_STRIP_DIS; + else + vsi->rx_rings[q_idx]->flags &= + ~ICE_RX_FLAGS_CRC_STRIP_DIS; + if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) @@ -2435,6 +2451,21 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) return ice_vc_process_vlan_msg(vf, msg, false); } +/** + * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not + * @vsi: pointer to the VF VSI info + */ +static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi) +{ + unsigned int i; + + ice_for_each_alloc_rxq(vsi, i) + if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS) + return true; + + return false; +} + /** * ice_vc_ena_vlan_stripping * @vf: pointer to the VF info @@ -2464,6 +2495,8 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; + else + vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, @@ -2499,6 +2532,8 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) if (vsi->inner_vlan_ops.dis_stripping(vsi)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; + else + vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, @@ -2676,6 +2711,8 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + vf->vlan_strip_ena = 0; + if (!vsi) return -EINVAL; @@ -2685,10 +2722,16 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw)) return 0; - if (ice_vf_vlan_offload_ena(vf->driver_caps)) - return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q); - else - return vsi->inner_vlan_ops.dis_stripping(vsi); + if (ice_vf_vlan_offload_ena(vf->driver_caps)) { + int err; + + err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q); + if (!err) + vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; + return err; + } + + return vsi->inner_vlan_ops.dis_stripping(vsi); } static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf) @@ -3462,6 +3505,11 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) goto out; } + if (ice_vsi_is_rxq_crc_strip_dis(vsi)) { + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto out; + } + ethertype_setting = strip_msg->outer_ethertype_setting; if (ethertype_setting) { if (ice_vc_ena_vlan_offload(vsi, @@ -3482,6 +3530,8 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) * enabled, is extracted in L2TAG1. */ ice_vsi_update_l2tsel(vsi, l2tsel); + + vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA; } } @@ -3493,6 +3543,9 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) goto out; } + if (ethertype_setting) + vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; + out: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); @@ -3554,6 +3607,8 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) * in L2TAG1. */ ice_vsi_update_l2tsel(vsi, l2tsel); + + vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA; } } @@ -3563,6 +3618,9 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) goto out; } + if (ethertype_setting) + vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA; + out: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index daa6a1e894..24b23b7ef0 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021, Intel Corporation. */ +/* Copyright (C) 2021-2023, Intel Corporation. */ #include "ice.h" #include "ice_base.h" @@ -1422,8 +1422,8 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, */ static void ice_vf_fdir_dump_info(struct ice_vf *vf) { + u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b; struct ice_vsi *vf_vsi; - u32 fd_size, fd_cnt; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -1442,12 +1442,25 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf) fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); - dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", - vf->vf_id, - (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, - (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, - (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, - (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); + switch (hw->mac_type) { + case ICE_MAC_E830: + fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size); + fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size); + fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); + fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); + break; + case ICE_MAC_E810: + default: + fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size); + fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size); + fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); + fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); + } + + dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n", + vf->vf_id, fd_size_g, fd_size_b); + dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n", + vf->vf_id, fd_cnt_g, fd_cnt_b); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c index 76266e709a..8307902115 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c @@ -131,6 +131,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; + u8 *ivf; int err; /* do not allow modifying VLAN stripping when a port VLAN is configured @@ -143,19 +144,24 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) if (!ctxt) return -ENOMEM; + ivf = &ctxt->info.inner_vlan_flags; + /* Here we are configuring what the VSI should do with the VLAN tag in * the Rx packet. We can either leave the tag in the packet or put it in * the Rx descriptor. */ - if (ena) + if (ena) { /* Strip VLAN tag from Rx packet and put it in the desc */ - ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH; - else + *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, + ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH); + } else { /* Disable stripping. Leave tag in packet */ - ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; + *ivf = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, + ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); + } /* Allow all packets untagged/tagged */ - ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; + *ivf |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 307c609137..f3663b3f63 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -217,21 +217,16 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) */ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) { - struct ice_aqc_add_tx_qgrp *qg_buf; + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); + u16 size = __struct_size(qg_buf); struct ice_q_vector *q_vector; struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; - u16 size; int err; if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) return -EINVAL; - size = struct_size(qg_buf, txqs, 1); - qg_buf = kzalloc(size, GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; - qg_buf->num_txqs = 1; tx_ring = vsi->tx_rings[q_idx]; @@ -240,7 +235,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf); if (err) - goto free_buf; + return err; if (ice_is_xdp_ena_vsi(vsi)) { struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; @@ -249,29 +244,28 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) qg_buf->num_txqs = 1; err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); if (err) - goto free_buf; + return err; ice_set_ring_xdp(xdp_ring); ice_tx_xsk_pool(vsi, q_idx); } err = ice_vsi_cfg_rxq(rx_ring); if (err) - goto free_buf; + return err; ice_qvec_cfg_msix(vsi, q_vector); err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); if (err) - goto free_buf; + return err; clear_bit(ICE_CFG_BUSY, vsi->state); ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); -free_buf: - kfree(qg_buf); - return err; + + return 0; } /** diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile new file mode 100644 index 0000000000..6844ead2f3 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2023 Intel Corporation + +# Makefile for Intel(R) Infrastructure Data Path Function Linux Driver + +obj-$(CONFIG_IDPF) += idpf.o + +idpf-y := \ + idpf_controlq.o \ + idpf_controlq_setup.o \ + idpf_dev.o \ + idpf_ethtool.o \ + idpf_lib.o \ + idpf_main.o \ + idpf_singleq_txrx.o \ + idpf_txrx.o \ + idpf_virtchnl.o \ + idpf_vf_dev.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h new file mode 100644 index 0000000000..bee73353b5 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -0,0 +1,968 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_H_ +#define _IDPF_H_ + +/* Forward declaration */ +struct idpf_adapter; +struct idpf_vport; +struct idpf_vport_max_q; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virtchnl2.h" +#include "idpf_lan_txrx.h" +#include "idpf_txrx.h" +#include "idpf_controlq.h" + +#define GETMAXVAL(num_bits) GENMASK((num_bits) - 1, 0) + +#define IDPF_NO_FREE_SLOT 0xffff + +/* Default Mailbox settings */ +#define IDPF_NUM_FILTERS_PER_MSG 20 +#define IDPF_NUM_DFLT_MBX_Q 2 /* includes both TX and RX */ +#define IDPF_DFLT_MBX_Q_LEN 64 +#define IDPF_DFLT_MBX_ID -1 +/* maximum number of times to try before resetting mailbox */ +#define IDPF_MB_MAX_ERR 20 +#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \ + ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz)) +#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000 +#define IDPF_WAIT_FOR_EVENT_TIMEO 60000 + +#define IDPF_MAX_WAIT 500 + +/* available message levels */ +#define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IDPF_DIM_PROFILE_SLOTS 5 + +#define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2 +#define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0 + +/** + * struct idpf_mac_filter + * @list: list member field + * @macaddr: MAC address + * @remove: filter should be removed (virtchnl) + * @add: filter should be added (virtchnl) + */ +struct idpf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + bool remove; + bool add; +}; + +/** + * enum idpf_state - State machine to handle bring up + * @__IDPF_STARTUP: Start the state machine + * @__IDPF_VER_CHECK: Negotiate virtchnl version + * @__IDPF_GET_CAPS: Negotiate capabilities + * @__IDPF_INIT_SW: Init based on given capabilities + * @__IDPF_STATE_LAST: Must be last, used to determine size + */ +enum idpf_state { + __IDPF_STARTUP, + __IDPF_VER_CHECK, + __IDPF_GET_CAPS, + __IDPF_INIT_SW, + __IDPF_STATE_LAST, +}; + +/** + * enum idpf_flags - Hard reset causes. + * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout + * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW + * @IDPF_HR_RESET_IN_PROG: Reset in progress + * @IDPF_REMOVE_IN_PROG: Driver remove in progress + * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode + * @IDPF_FLAGS_NBITS: Must be last + */ +enum idpf_flags { + IDPF_HR_FUNC_RESET, + IDPF_HR_DRV_LOAD, + IDPF_HR_RESET_IN_PROG, + IDPF_REMOVE_IN_PROG, + IDPF_MB_INTR_MODE, + IDPF_FLAGS_NBITS, +}; + +/** + * enum idpf_cap_field - Offsets into capabilities struct for specific caps + * @IDPF_BASE_CAPS: generic base capabilities + * @IDPF_CSUM_CAPS: checksum offload capabilities + * @IDPF_SEG_CAPS: segmentation offload capabilities + * @IDPF_RSS_CAPS: RSS offload capabilities + * @IDPF_HSPLIT_CAPS: Header split capabilities + * @IDPF_RSC_CAPS: RSC offload capabilities + * @IDPF_OTHER_CAPS: miscellaneous offloads + * + * Used when checking for a specific capability flag since different capability + * sets are not mutually exclusive numerically, the caller must specify which + * type of capability they are checking for. + */ +enum idpf_cap_field { + IDPF_BASE_CAPS = -1, + IDPF_CSUM_CAPS = offsetof(struct virtchnl2_get_capabilities, + csum_caps), + IDPF_SEG_CAPS = offsetof(struct virtchnl2_get_capabilities, + seg_caps), + IDPF_RSS_CAPS = offsetof(struct virtchnl2_get_capabilities, + rss_caps), + IDPF_HSPLIT_CAPS = offsetof(struct virtchnl2_get_capabilities, + hsplit_caps), + IDPF_RSC_CAPS = offsetof(struct virtchnl2_get_capabilities, + rsc_caps), + IDPF_OTHER_CAPS = offsetof(struct virtchnl2_get_capabilities, + other_caps), +}; + +/** + * enum idpf_vport_state - Current vport state + * @__IDPF_VPORT_DOWN: Vport is down + * @__IDPF_VPORT_UP: Vport is up + * @__IDPF_VPORT_STATE_LAST: Must be last, number of states + */ +enum idpf_vport_state { + __IDPF_VPORT_DOWN, + __IDPF_VPORT_UP, + __IDPF_VPORT_STATE_LAST, +}; + +/** + * struct idpf_netdev_priv - Struct to store vport back pointer + * @adapter: Adapter back pointer + * @vport: Vport back pointer + * @vport_id: Vport identifier + * @vport_idx: Relative vport index + * @state: See enum idpf_vport_state + * @netstats: Packet and byte stats + * @stats_lock: Lock to protect stats update + */ +struct idpf_netdev_priv { + struct idpf_adapter *adapter; + struct idpf_vport *vport; + u32 vport_id; + u16 vport_idx; + enum idpf_vport_state state; + struct rtnl_link_stats64 netstats; + spinlock_t stats_lock; +}; + +/** + * struct idpf_reset_reg - Reset register offsets/masks + * @rstat: Reset status register + * @rstat_m: Reset status mask + */ +struct idpf_reset_reg { + void __iomem *rstat; + u32 rstat_m; +}; + +/** + * struct idpf_vport_max_q - Queue limits + * @max_rxq: Maximum number of RX queues supported + * @max_txq: Maixmum number of TX queues supported + * @max_bufq: In splitq, maximum number of buffer queues supported + * @max_complq: In splitq, maximum number of completion queues supported + */ +struct idpf_vport_max_q { + u16 max_rxq; + u16 max_txq; + u16 max_bufq; + u16 max_complq; +}; + +/** + * struct idpf_reg_ops - Device specific register operation function pointers + * @ctlq_reg_init: Mailbox control queue register initialization + * @intr_reg_init: Traffic interrupt register initialization + * @mb_intr_reg_init: Mailbox interrupt register initialization + * @reset_reg_init: Reset register initialization + * @trigger_reset: Trigger a reset to occur + */ +struct idpf_reg_ops { + void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); + int (*intr_reg_init)(struct idpf_vport *vport); + void (*mb_intr_reg_init)(struct idpf_adapter *adapter); + void (*reset_reg_init)(struct idpf_adapter *adapter); + void (*trigger_reset)(struct idpf_adapter *adapter, + enum idpf_flags trig_cause); +}; + +/** + * struct idpf_dev_ops - Device specific operations + * @reg_ops: Register operations + */ +struct idpf_dev_ops { + struct idpf_reg_ops reg_ops; +}; + +/* These macros allow us to generate an enum and a matching char * array of + * stringified enums that are always in sync. Checkpatch issues a bogus warning + * about this being a complex macro; but it's wrong, these are never used as a + * statement and instead only used to define the enum and array. + */ +#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \ + STATE(IDPF_VC_CREATE_VPORT) \ + STATE(IDPF_VC_CREATE_VPORT_ERR) \ + STATE(IDPF_VC_ENA_VPORT) \ + STATE(IDPF_VC_ENA_VPORT_ERR) \ + STATE(IDPF_VC_DIS_VPORT) \ + STATE(IDPF_VC_DIS_VPORT_ERR) \ + STATE(IDPF_VC_DESTROY_VPORT) \ + STATE(IDPF_VC_DESTROY_VPORT_ERR) \ + STATE(IDPF_VC_CONFIG_TXQ) \ + STATE(IDPF_VC_CONFIG_TXQ_ERR) \ + STATE(IDPF_VC_CONFIG_RXQ) \ + STATE(IDPF_VC_CONFIG_RXQ_ERR) \ + STATE(IDPF_VC_ENA_QUEUES) \ + STATE(IDPF_VC_ENA_QUEUES_ERR) \ + STATE(IDPF_VC_DIS_QUEUES) \ + STATE(IDPF_VC_DIS_QUEUES_ERR) \ + STATE(IDPF_VC_MAP_IRQ) \ + STATE(IDPF_VC_MAP_IRQ_ERR) \ + STATE(IDPF_VC_UNMAP_IRQ) \ + STATE(IDPF_VC_UNMAP_IRQ_ERR) \ + STATE(IDPF_VC_ADD_QUEUES) \ + STATE(IDPF_VC_ADD_QUEUES_ERR) \ + STATE(IDPF_VC_DEL_QUEUES) \ + STATE(IDPF_VC_DEL_QUEUES_ERR) \ + STATE(IDPF_VC_ALLOC_VECTORS) \ + STATE(IDPF_VC_ALLOC_VECTORS_ERR) \ + STATE(IDPF_VC_DEALLOC_VECTORS) \ + STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \ + STATE(IDPF_VC_SET_SRIOV_VFS) \ + STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \ + STATE(IDPF_VC_GET_RSS_LUT) \ + STATE(IDPF_VC_GET_RSS_LUT_ERR) \ + STATE(IDPF_VC_SET_RSS_LUT) \ + STATE(IDPF_VC_SET_RSS_LUT_ERR) \ + STATE(IDPF_VC_GET_RSS_KEY) \ + STATE(IDPF_VC_GET_RSS_KEY_ERR) \ + STATE(IDPF_VC_SET_RSS_KEY) \ + STATE(IDPF_VC_SET_RSS_KEY_ERR) \ + STATE(IDPF_VC_GET_STATS) \ + STATE(IDPF_VC_GET_STATS_ERR) \ + STATE(IDPF_VC_ADD_MAC_ADDR) \ + STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \ + STATE(IDPF_VC_DEL_MAC_ADDR) \ + STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \ + STATE(IDPF_VC_GET_PTYPE_INFO) \ + STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \ + STATE(IDPF_VC_LOOPBACK_STATE) \ + STATE(IDPF_VC_LOOPBACK_STATE_ERR) \ + STATE(IDPF_VC_NBITS) + +#define IDPF_GEN_ENUM(ENUM) ENUM, +#define IDPF_GEN_STRING(STRING) #STRING, + +enum idpf_vport_vc_state { + IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM) +}; + +extern const char * const idpf_vport_vc_state_str[]; + +/** + * enum idpf_vport_reset_cause - Vport soft reset causes + * @IDPF_SR_Q_CHANGE: Soft reset queue change + * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change + * @IDPF_SR_MTU_CHANGE: Soft reset MTU change + * @IDPF_SR_RSC_CHANGE: Soft reset RSC change + */ +enum idpf_vport_reset_cause { + IDPF_SR_Q_CHANGE, + IDPF_SR_Q_DESC_CHANGE, + IDPF_SR_MTU_CHANGE, + IDPF_SR_RSC_CHANGE, +}; + +/** + * enum idpf_vport_flags - Vport flags + * @IDPF_VPORT_DEL_QUEUES: To send delete queues message + * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets + * processing is done + * @IDPF_VPORT_FLAGS_NBITS: Must be last + */ +enum idpf_vport_flags { + IDPF_VPORT_DEL_QUEUES, + IDPF_VPORT_SW_MARKER, + IDPF_VPORT_FLAGS_NBITS, +}; + +struct idpf_port_stats { + struct u64_stats_sync stats_sync; + u64_stats_t rx_hw_csum_err; + u64_stats_t rx_hsplit; + u64_stats_t rx_hsplit_hbo; + u64_stats_t rx_bad_descs; + u64_stats_t tx_linearize; + u64_stats_t tx_busy; + u64_stats_t tx_drops; + u64_stats_t tx_dma_map_errs; + struct virtchnl2_vport_stats vport_stats; +}; + +/** + * struct idpf_vport - Handle for netdevices and queue resources + * @num_txq: Number of allocated TX queues + * @num_complq: Number of allocated completion queues + * @txq_desc_count: TX queue descriptor count + * @complq_desc_count: Completion queue descriptor count + * @compln_clean_budget: Work budget for completion clean + * @num_txq_grp: Number of TX queue groups + * @txq_grps: Array of TX queue groups + * @txq_model: Split queue or single queue queuing model + * @txqs: Used only in hotpath to get to the right queue very fast + * @crc_enable: Enable CRC insertion offload + * @num_rxq: Number of allocated RX queues + * @num_bufq: Number of allocated buffer queues + * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors + * to complete all buffer descriptors for all buffer queues in + * the worst case. + * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping + * @bufq_desc_count: Buffer queue descriptor count + * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc) + * @num_rxq_grp: Number of RX queues in a group + * @rxq_grps: Total number of RX groups. Number of groups * number of RX per + * group will yield total number of RX queues. + * @rxq_model: Splitq queue or single queue queuing model + * @rx_ptype_lkup: Lookup table for ptypes on RX + * @adapter: back pointer to associated adapter + * @netdev: Associated net_device. Each vport should have one and only one + * associated netdev. + * @flags: See enum idpf_vport_flags + * @vport_type: Default SRIOV, SIOV, etc. + * @vport_id: Device given vport identifier + * @idx: Software index in adapter vports struct + * @default_vport: Use this vport if one isn't specified + * @base_rxd: True if the driver should use base descriptors instead of flex + * @num_q_vectors: Number of IRQ vectors allocated + * @q_vectors: Array of queue vectors + * @q_vector_idxs: Starting index of queue vectors + * @max_mtu: device given max possible MTU + * @default_mac_addr: device will give a default MAC to use + * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation + * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation + * @port_stats: per port csum, header split, and other offload stats + * @link_up: True if link is up + * @link_speed_mbps: Link speed in mbps + * @vc_msg: Virtchnl message buffer + * @vc_state: Virtchnl message state + * @vchnl_wq: Wait queue for virtchnl messages + * @sw_marker_wq: workqueue for marker packets + * @vc_buf_lock: Lock to protect virtchnl buffer + */ +struct idpf_vport { + u16 num_txq; + u16 num_complq; + u32 txq_desc_count; + u32 complq_desc_count; + u32 compln_clean_budget; + u16 num_txq_grp; + struct idpf_txq_group *txq_grps; + u32 txq_model; + struct idpf_queue **txqs; + bool crc_enable; + + u16 num_rxq; + u16 num_bufq; + u32 rxq_desc_count; + u8 num_bufqs_per_qgrp; + u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; + u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP]; + u16 num_rxq_grp; + struct idpf_rxq_group *rxq_grps; + u32 rxq_model; + struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE]; + + struct idpf_adapter *adapter; + struct net_device *netdev; + DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS); + u16 vport_type; + u32 vport_id; + u16 idx; + bool default_vport; + bool base_rxd; + + u16 num_q_vectors; + struct idpf_q_vector *q_vectors; + u16 *q_vector_idxs; + u16 max_mtu; + u8 default_mac_addr[ETH_ALEN]; + u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; + u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; + struct idpf_port_stats port_stats; + + bool link_up; + u32 link_speed_mbps; + + char vc_msg[IDPF_CTLQ_MAX_BUF_LEN]; + DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); + + wait_queue_head_t vchnl_wq; + wait_queue_head_t sw_marker_wq; + struct mutex vc_buf_lock; +}; + +/** + * enum idpf_user_flags + * @__IDPF_PROMISC_UC: Unicast promiscuous mode + * @__IDPF_PROMISC_MC: Multicast promiscuous mode + * @__IDPF_USER_FLAGS_NBITS: Must be last + */ +enum idpf_user_flags { + __IDPF_PROMISC_UC = 32, + __IDPF_PROMISC_MC, + + __IDPF_USER_FLAGS_NBITS, +}; + +/** + * struct idpf_rss_data - Associated RSS data + * @rss_key_size: Size of RSS hash key + * @rss_key: RSS hash key + * @rss_lut_size: Size of RSS lookup table + * @rss_lut: RSS lookup table + * @cached_lut: Used to restore previously init RSS lut + */ +struct idpf_rss_data { + u16 rss_key_size; + u8 *rss_key; + u16 rss_lut_size; + u32 *rss_lut; + u32 *cached_lut; +}; + +/** + * struct idpf_vport_user_config_data - User defined configuration values for + * each vport. + * @rss_data: See struct idpf_rss_data + * @num_req_tx_qs: Number of user requested TX queues through ethtool + * @num_req_rx_qs: Number of user requested RX queues through ethtool + * @num_req_txq_desc: Number of user requested TX queue descriptors through + * ethtool + * @num_req_rxq_desc: Number of user requested RX queue descriptors through + * ethtool + * @user_flags: User toggled config flags + * @mac_filter_list: List of MAC filters + * + * Used to restore configuration after a reset as the vport will get wiped. + */ +struct idpf_vport_user_config_data { + struct idpf_rss_data rss_data; + u16 num_req_tx_qs; + u16 num_req_rx_qs; + u32 num_req_txq_desc; + u32 num_req_rxq_desc; + DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); + struct list_head mac_filter_list; +}; + +/** + * enum idpf_vport_config_flags - Vport config flags + * @IDPF_VPORT_REG_NETDEV: Register netdev + * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset + * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight + * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight + * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last + */ +enum idpf_vport_config_flags { + IDPF_VPORT_REG_NETDEV, + IDPF_VPORT_UP_REQUESTED, + IDPF_VPORT_ADD_MAC_REQ, + IDPF_VPORT_DEL_MAC_REQ, + IDPF_VPORT_CONFIG_FLAGS_NBITS, +}; + +/** + * struct idpf_avail_queue_info + * @avail_rxq: Available RX queues + * @avail_txq: Available TX queues + * @avail_bufq: Available buffer queues + * @avail_complq: Available completion queues + * + * Maintain total queues available after allocating max queues to each vport. + */ +struct idpf_avail_queue_info { + u16 avail_rxq; + u16 avail_txq; + u16 avail_bufq; + u16 avail_complq; +}; + +/** + * struct idpf_vector_info - Utility structure to pass function arguments as a + * structure + * @num_req_vecs: Vectors required based on the number of queues updated by the + * user via ethtool + * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs + * @index: Relative starting index for vectors + * @default_vport: Vectors are for default vport + */ +struct idpf_vector_info { + u16 num_req_vecs; + u16 num_curr_vecs; + u16 index; + bool default_vport; +}; + +/** + * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector + * distribution algorithm + * @top: Points to stack top i.e. next available vector index + * @base: Always points to start of the free pool + * @size: Total size of the vector stack + * @vec_idx: Array to store all the vector indexes + * + * Vector stack maintains all the relative vector indexes at the *adapter* + * level. This stack is divided into 2 parts, first one is called as 'default + * pool' and other one is called 'free pool'. Vector distribution algorithm + * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC + * vectors are allocated per default vport and the relative vector indexes for + * those are maintained in default pool. Free pool contains all the unallocated + * vector indexes which can be allocated on-demand basis. Mailbox vector index + * is maintained in the default pool of the stack. + */ +struct idpf_vector_lifo { + u16 top; + u16 base; + u16 size; + u16 *vec_idx; +}; + +/** + * struct idpf_vport_config - Vport configuration data + * @user_config: see struct idpf_vport_user_config_data + * @max_q: Maximum possible queues + * @req_qs_chunks: Queue chunk data for requested queues + * @mac_filter_list_lock: Lock to protect mac filters + * @flags: See enum idpf_vport_config_flags + */ +struct idpf_vport_config { + struct idpf_vport_user_config_data user_config; + struct idpf_vport_max_q max_q; + void *req_qs_chunks; + spinlock_t mac_filter_list_lock; + DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); +}; + +/** + * struct idpf_adapter - Device data struct generated on probe + * @pdev: PCI device struct given on probe + * @virt_ver_maj: Virtchnl version major + * @virt_ver_min: Virtchnl version minor + * @msg_enable: Debug message level enabled + * @mb_wait_count: Number of times mailbox was attempted initialization + * @state: Init state machine + * @flags: See enum idpf_flags + * @reset_reg: See struct idpf_reset_reg + * @hw: Device access data + * @num_req_msix: Requested number of MSIX vectors + * @num_avail_msix: Available number of MSIX vectors + * @num_msix_entries: Number of entries in MSIX table + * @msix_entries: MSIX table + * @req_vec_chunks: Requested vector chunk data + * @mb_vector: Mailbox vector data + * @vector_stack: Stack to store the msix vector indexes + * @irq_mb_handler: Handler for hard interrupt for mailbox + * @tx_timeout_count: Number of TX timeouts that have occurred + * @avail_queues: Device given queue limits + * @vports: Array to store vports created by the driver + * @netdevs: Associated Vport netdevs + * @vport_params_reqd: Vport params requested + * @vport_params_recvd: Vport params received + * @vport_ids: Array of device given vport identifiers + * @vport_config: Vport config parameters + * @max_vports: Maximum vports that can be allocated + * @num_alloc_vports: Current number of vports allocated + * @next_vport: Next free slot in pf->vport[] - 0-based! + * @init_task: Initialization task + * @init_wq: Workqueue for initialization task + * @serv_task: Periodically recurring maintenance task + * @serv_wq: Workqueue for service task + * @mbx_task: Task to handle mailbox interrupts + * @mbx_wq: Workqueue for mailbox responses + * @vc_event_task: Task to handle out of band virtchnl event notifications + * @vc_event_wq: Workqueue for virtchnl events + * @stats_task: Periodic statistics retrieval task + * @stats_wq: Workqueue for statistics task + * @caps: Negotiated capabilities with device + * @vchnl_wq: Wait queue for virtchnl messages + * @vc_state: Virtchnl message state + * @vc_msg: Virtchnl message buffer + * @dev_ops: See idpf_dev_ops + * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk + * to VFs but is used to initialize them + * @crc_enable: Enable CRC insertion offload + * @req_tx_splitq: TX split or single queue model to request + * @req_rx_splitq: RX split or single queue model to request + * @vport_ctrl_lock: Lock to protect the vport control flow + * @vector_lock: Lock to protect vector distribution + * @queue_lock: Lock to protect queue distribution + * @vc_buf_lock: Lock to protect virtchnl buffer + */ +struct idpf_adapter { + struct pci_dev *pdev; + u32 virt_ver_maj; + u32 virt_ver_min; + + u32 msg_enable; + u32 mb_wait_count; + enum idpf_state state; + DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS); + struct idpf_reset_reg reset_reg; + struct idpf_hw hw; + u16 num_req_msix; + u16 num_avail_msix; + u16 num_msix_entries; + struct msix_entry *msix_entries; + struct virtchnl2_alloc_vectors *req_vec_chunks; + struct idpf_q_vector mb_vector; + struct idpf_vector_lifo vector_stack; + irqreturn_t (*irq_mb_handler)(int irq, void *data); + + u32 tx_timeout_count; + struct idpf_avail_queue_info avail_queues; + struct idpf_vport **vports; + struct net_device **netdevs; + struct virtchnl2_create_vport **vport_params_reqd; + struct virtchnl2_create_vport **vport_params_recvd; + u32 *vport_ids; + + struct idpf_vport_config **vport_config; + u16 max_vports; + u16 num_alloc_vports; + u16 next_vport; + + struct delayed_work init_task; + struct workqueue_struct *init_wq; + struct delayed_work serv_task; + struct workqueue_struct *serv_wq; + struct delayed_work mbx_task; + struct workqueue_struct *mbx_wq; + struct delayed_work vc_event_task; + struct workqueue_struct *vc_event_wq; + struct delayed_work stats_task; + struct workqueue_struct *stats_wq; + struct virtchnl2_get_capabilities caps; + + wait_queue_head_t vchnl_wq; + DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); + char vc_msg[IDPF_CTLQ_MAX_BUF_LEN]; + struct idpf_dev_ops dev_ops; + int num_vfs; + bool crc_enable; + bool req_tx_splitq; + bool req_rx_splitq; + + struct mutex vport_ctrl_lock; + struct mutex vector_lock; + struct mutex queue_lock; + struct mutex vc_buf_lock; +}; + +/** + * idpf_is_queue_model_split - check if queue model is split + * @q_model: queue model single or split + * + * Returns true if queue model is split else false + */ +static inline int idpf_is_queue_model_split(u16 q_model) +{ + return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; +} + +#define idpf_is_cap_ena(adapter, field, flag) \ + idpf_is_capability_ena(adapter, false, field, flag) +#define idpf_is_cap_ena_all(adapter, field, flag) \ + idpf_is_capability_ena(adapter, true, field, flag) + +bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, + enum idpf_cap_field field, u64 flag); + +#define IDPF_CAP_RSS (\ + VIRTCHNL2_CAP_RSS_IPV4_TCP |\ + VIRTCHNL2_CAP_RSS_IPV4_TCP |\ + VIRTCHNL2_CAP_RSS_IPV4_UDP |\ + VIRTCHNL2_CAP_RSS_IPV4_SCTP |\ + VIRTCHNL2_CAP_RSS_IPV4_OTHER |\ + VIRTCHNL2_CAP_RSS_IPV6_TCP |\ + VIRTCHNL2_CAP_RSS_IPV6_TCP |\ + VIRTCHNL2_CAP_RSS_IPV6_UDP |\ + VIRTCHNL2_CAP_RSS_IPV6_SCTP |\ + VIRTCHNL2_CAP_RSS_IPV6_OTHER) + +#define IDPF_CAP_RSC (\ + VIRTCHNL2_CAP_RSC_IPV4_TCP |\ + VIRTCHNL2_CAP_RSC_IPV6_TCP) + +#define IDPF_CAP_HSPLIT (\ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) + +#define IDPF_CAP_RX_CSUM_L4V4 (\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP) + +#define IDPF_CAP_RX_CSUM_L4V6 (\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) + +#define IDPF_CAP_RX_CSUM (\ + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) + +#define IDPF_CAP_SCTP_CSUM (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP) + +#define IDPF_CAP_TUNNEL_TX_CSUM (\ + VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\ + VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL) + +/** + * idpf_get_reserved_vecs - Get reserved vectors + * @adapter: private data struct + */ +static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.num_allocated_vectors); +} + +/** + * idpf_get_default_vports - Get default number of vports + * @adapter: private data struct + */ +static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.default_num_vports); +} + +/** + * idpf_get_max_vports - Get max number of vports + * @adapter: private data struct + */ +static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.max_vports); +} + +/** + * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device + * @adapter: private data struct + */ +static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter) +{ + return adapter->caps.max_sg_bufs_per_tx_pkt; +} + +/** + * idpf_get_min_tx_pkt_len - Get min packet length supported by the device + * @adapter: private data struct + */ +static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) +{ + u8 pkt_len = adapter->caps.min_sso_packet_len; + + return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN; +} + +/** + * idpf_get_reg_addr - Get BAR0 register address + * @adapter: private data struct + * @reg_offset: register offset value + * + * Based on the register offset, return the actual BAR0 register address + */ +static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter, + resource_size_t reg_offset) +{ + return (void __iomem *)(adapter->hw.hw_addr + reg_offset); +} + +/** + * idpf_is_reset_detected - check if we were reset at some point + * @adapter: driver specific private structure + * + * Returns true if we are either in reset currently or were previously reset. + */ +static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter) +{ + if (!adapter->hw.arq) + return true; + + return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) & + adapter->hw.arq->reg.len_mask); +} + +/** + * idpf_is_reset_in_prog - check if reset is in progress + * @adapter: driver specific private structure + * + * Returns true if hard reset is in progress, false otherwise + */ +static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter) +{ + return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) || + test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || + test_bit(IDPF_HR_DRV_LOAD, adapter->flags)); +} + +/** + * idpf_netdev_to_vport - get a vport handle from a netdev + * @netdev: network interface device structure + */ +static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + return np->vport; +} + +/** + * idpf_netdev_to_adapter - Get adapter handle from a netdev + * @netdev: Network interface device structure + */ +static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + return np->adapter; +} + +/** + * idpf_is_feature_ena - Determine if a particular feature is enabled + * @vport: Vport to check + * @feature: Netdev flag to check + * + * Returns true or false if a particular feature is enabled. + */ +static inline bool idpf_is_feature_ena(const struct idpf_vport *vport, + netdev_features_t feature) +{ + return vport->netdev->features & feature; +} + +/** + * idpf_get_max_tx_hdr_size -- get the size of tx header + * @adapter: Driver specific private structure + */ +static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.max_tx_hdr_size); +} + +/** + * idpf_vport_ctrl_lock - Acquire the vport control lock + * @netdev: Network interface device structure + * + * This lock should be used by non-datapath code to protect against vport + * destruction. + */ +static inline void idpf_vport_ctrl_lock(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + mutex_lock(&np->adapter->vport_ctrl_lock); +} + +/** + * idpf_vport_ctrl_unlock - Release the vport control lock + * @netdev: Network interface device structure + */ +static inline void idpf_vport_ctrl_unlock(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + mutex_unlock(&np->adapter->vport_ctrl_lock); +} + +void idpf_statistics_task(struct work_struct *work); +void idpf_init_task(struct work_struct *work); +void idpf_service_task(struct work_struct *work); +void idpf_mbx_task(struct work_struct *work); +void idpf_vc_event_task(struct work_struct *work); +void idpf_dev_ops_init(struct idpf_adapter *adapter); +void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); +int idpf_vport_adjust_qs(struct idpf_vport *vport); +int idpf_init_dflt_mbx(struct idpf_adapter *adapter); +void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter); +int idpf_vc_core_init(struct idpf_adapter *adapter); +void idpf_vc_core_deinit(struct idpf_adapter *adapter); +int idpf_intr_req(struct idpf_adapter *adapter); +void idpf_intr_rel(struct idpf_adapter *adapter); +int idpf_get_reg_intr_vecs(struct idpf_vport *vport, + struct idpf_vec_regs *reg_vals); +u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); +int idpf_send_delete_queues_msg(struct idpf_vport *vport); +int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, + u16 num_complq, u16 num_rx_q, u16 num_rx_bufq); +int idpf_initiate_soft_reset(struct idpf_vport *vport, + enum idpf_vport_reset_cause reset_cause); +int idpf_send_enable_vport_msg(struct idpf_vport *vport); +int idpf_send_disable_vport_msg(struct idpf_vport *vport); +int idpf_send_destroy_vport_msg(struct idpf_vport *vport); +int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport); +int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport); +int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); +int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); +int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter); +int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors); +void idpf_deinit_task(struct idpf_adapter *adapter); +int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, + u16 *q_vector_idxs, + struct idpf_vector_info *vec_info); +int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport); +int idpf_send_get_stats_msg(struct idpf_vport *vport); +int idpf_get_vec_ids(struct idpf_adapter *adapter, + u16 *vecids, int num_vecids, + struct virtchnl2_vector_chunks *chunks); +int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, + void *msg, int msg_size); +int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, + u16 msg_size, u8 *msg); +void idpf_set_ethtool_ops(struct net_device *netdev); +int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +int idpf_add_del_mac_filters(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + bool add, bool async); +int idpf_set_promiscuous(struct idpf_adapter *adapter, + struct idpf_vport_user_config_data *config_data, + u32 vport_id); +int idpf_send_disable_queues_msg(struct idpf_vport *vport); +void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q); +u32 idpf_get_vport_id(struct idpf_vport *vport); +int idpf_vport_queue_ids_init(struct idpf_vport *vport); +int idpf_queue_reg_init(struct idpf_vport *vport); +int idpf_send_config_queues_msg(struct idpf_vport *vport); +int idpf_send_enable_queues_msg(struct idpf_vport *vport); +int idpf_send_create_vport_msg(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +int idpf_check_supported_desc_ids(struct idpf_vport *vport); +void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, + u16 itr, bool tx); +int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); +int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); +int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); + +#endif /* !_IDPF_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c new file mode 100644 index 0000000000..c7f43d2fcd --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf_controlq.h" + +/** + * idpf_ctlq_setup_regs - initialize control queue registers + * @cq: pointer to the specific control queue + * @q_create_info: structs containing info for each queue to be initialized + */ +static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq, + struct idpf_ctlq_create_info *q_create_info) +{ + /* set control queue registers in our local struct */ + cq->reg.head = q_create_info->reg.head; + cq->reg.tail = q_create_info->reg.tail; + cq->reg.len = q_create_info->reg.len; + cq->reg.bah = q_create_info->reg.bah; + cq->reg.bal = q_create_info->reg.bal; + cq->reg.len_mask = q_create_info->reg.len_mask; + cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask; + cq->reg.head_mask = q_create_info->reg.head_mask; +} + +/** + * idpf_ctlq_init_regs - Initialize control queue registers + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * @is_rxq: true if receive control queue, false otherwise + * + * Initialize registers. The caller is expected to have already initialized the + * descriptor ring memory and buffer memory + */ +static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + bool is_rxq) +{ + /* Update tail to post pre-allocated buffers for rx queues */ + if (is_rxq) + wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); + + /* For non-Mailbox control queues only TAIL need to be set */ + if (cq->q_id != -1) + return; + + /* Clear Head for both send or receive */ + wr32(hw, cq->reg.head, 0); + + /* set starting point */ + wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); + wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); + wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); +} + +/** + * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf + * @cq: pointer to the specific Control queue + * + * Record the address of the receive queue DMA buffers in the descriptors. + * The buffers must have been previously allocated. + */ +static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) +{ + int i; + + for (i = 0; i < cq->ring_size; i++) { + struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i); + struct idpf_dma_mem *bi = cq->bi.rx_buff[i]; + + /* No buffer to post to descriptor, continue */ + if (!bi) + continue; + + desc->flags = + cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); + desc->opcode = 0; + desc->datalen = cpu_to_le16(bi->size); + desc->ret_val = 0; + desc->v_opcode_dtype = 0; + desc->v_retval = 0; + desc->params.indirect.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.indirect.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.indirect.param0 = 0; + desc->params.indirect.sw_cookie = 0; + desc->params.indirect.v_flags = 0; + } +} + +/** + * idpf_ctlq_shutdown - shutdown the CQ + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for any controq queue + */ +static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + mutex_lock(&cq->cq_lock); + + /* free ring buffers and the ring itself */ + idpf_ctlq_dealloc_ring_res(hw, cq); + + /* Set ring_size to 0 to indicate uninitialized queue */ + cq->ring_size = 0; + + mutex_unlock(&cq->cq_lock); + mutex_destroy(&cq->cq_lock); +} + +/** + * idpf_ctlq_add - add one control queue + * @hw: pointer to hardware struct + * @qinfo: info for queue to be created + * @cq_out: (output) double pointer to control queue to be created + * + * Allocate and initialize a control queue and add it to the control queue list. + * The cq parameter will be allocated/initialized and passed back to the caller + * if no errors occur. + * + * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add + */ +int idpf_ctlq_add(struct idpf_hw *hw, + struct idpf_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq_out) +{ + struct idpf_ctlq_info *cq; + bool is_rxq = false; + int err; + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return -ENOMEM; + + cq->cq_type = qinfo->type; + cq->q_id = qinfo->id; + cq->buf_size = qinfo->buf_size; + cq->ring_size = qinfo->len; + + cq->next_to_use = 0; + cq->next_to_clean = 0; + cq->next_to_post = cq->ring_size - 1; + + switch (qinfo->type) { + case IDPF_CTLQ_TYPE_MAILBOX_RX: + is_rxq = true; + fallthrough; + case IDPF_CTLQ_TYPE_MAILBOX_TX: + err = idpf_ctlq_alloc_ring_res(hw, cq); + break; + default: + err = -EBADR; + break; + } + + if (err) + goto init_free_q; + + if (is_rxq) { + idpf_ctlq_init_rxq_bufs(cq); + } else { + /* Allocate the array of msg pointers for TX queues */ + cq->bi.tx_msg = kcalloc(qinfo->len, + sizeof(struct idpf_ctlq_msg *), + GFP_KERNEL); + if (!cq->bi.tx_msg) { + err = -ENOMEM; + goto init_dealloc_q_mem; + } + } + + idpf_ctlq_setup_regs(cq, qinfo); + + idpf_ctlq_init_regs(hw, cq, is_rxq); + + mutex_init(&cq->cq_lock); + + list_add(&cq->cq_list, &hw->cq_list_head); + + *cq_out = cq; + + return 0; + +init_dealloc_q_mem: + /* free ring buffers and the ring itself */ + idpf_ctlq_dealloc_ring_res(hw, cq); +init_free_q: + kfree(cq); + + return err; +} + +/** + * idpf_ctlq_remove - deallocate and remove specified control queue + * @hw: pointer to hardware struct + * @cq: pointer to control queue to be removed + */ +void idpf_ctlq_remove(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + list_del(&cq->cq_list); + idpf_ctlq_shutdown(hw, cq); + kfree(cq); +} + +/** + * idpf_ctlq_init - main initialization routine for all control queues + * @hw: pointer to hardware struct + * @num_q: number of queues to initialize + * @q_info: array of structs containing info for each queue to be initialized + * + * This initializes any number and any type of control queues. This is an all + * or nothing routine; if one fails, all previously allocated queues will be + * destroyed. This must be called prior to using the individual add/remove + * APIs. + */ +int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, + struct idpf_ctlq_create_info *q_info) +{ + struct idpf_ctlq_info *cq, *tmp; + int err; + int i; + + INIT_LIST_HEAD(&hw->cq_list_head); + + for (i = 0; i < num_q; i++) { + struct idpf_ctlq_create_info *qinfo = q_info + i; + + err = idpf_ctlq_add(hw, qinfo, &cq); + if (err) + goto init_destroy_qs; + } + + return 0; + +init_destroy_qs: + list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) + idpf_ctlq_remove(hw, cq); + + return err; +} + +/** + * idpf_ctlq_deinit - destroy all control queues + * @hw: pointer to hw struct + */ +void idpf_ctlq_deinit(struct idpf_hw *hw) +{ + struct idpf_ctlq_info *cq, *tmp; + + list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) + idpf_ctlq_remove(hw, cq); +} + +/** + * idpf_ctlq_send - send command to Control Queue (CTQ) + * @hw: pointer to hw struct + * @cq: handle to control queue struct to send on + * @num_q_msg: number of messages to send on control queue + * @q_msg: pointer to array of queue messages to be sent + * + * The caller is expected to allocate DMAable buffers and pass them to the + * send routine via the q_msg struct / control queue specific data struct. + * The control queue will hold a reference to each send message until + * the completion for that message has been cleaned. + */ +int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 num_q_msg, struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_desc *desc; + int num_desc_avail; + int err = 0; + int i; + + mutex_lock(&cq->cq_lock); + + /* Ensure there are enough descriptors to send all messages */ + num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); + if (num_desc_avail == 0 || num_desc_avail < num_q_msg) { + err = -ENOSPC; + goto err_unlock; + } + + for (i = 0; i < num_q_msg; i++) { + struct idpf_ctlq_msg *msg = &q_msg[i]; + + desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); + + desc->opcode = cpu_to_le16(msg->opcode); + desc->pfid_vfid = cpu_to_le16(msg->func_id); + + desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode); + desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval); + + desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) << + IDPF_CTLQ_FLAG_HOST_ID_S); + if (msg->data_len) { + struct idpf_dma_mem *buff = msg->ctx.indirect.payload; + + desc->datalen |= cpu_to_le16(msg->data_len); + desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF); + desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD); + + /* Update the address values in the desc with the pa + * value for respective buffer + */ + desc->params.indirect.addr_high = + cpu_to_le32(upper_32_bits(buff->pa)); + desc->params.indirect.addr_low = + cpu_to_le32(lower_32_bits(buff->pa)); + + memcpy(&desc->params, msg->ctx.indirect.context, + IDPF_INDIRECT_CTX_SIZE); + } else { + memcpy(&desc->params, msg->ctx.direct, + IDPF_DIRECT_CTX_SIZE); + } + + /* Store buffer info */ + cq->bi.tx_msg[cq->next_to_use] = msg; + + (cq->next_to_use)++; + if (cq->next_to_use == cq->ring_size) + cq->next_to_use = 0; + } + + /* Force memory write to complete before letting hardware + * know that there are new descriptors to fetch. + */ + dma_wmb(); + + wr32(hw, cq->reg.tail, cq->next_to_use); + +err_unlock: + mutex_unlock(&cq->cq_lock); + + return err; +} + +/** + * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the + * requested queue + * @cq: pointer to the specific Control queue + * @clean_count: (input|output) number of descriptors to clean as input, and + * number of descriptors actually cleaned as output + * @msg_status: (output) pointer to msg pointer array to be populated; needs + * to be allocated by caller + * + * Returns an array of message pointers associated with the cleaned + * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned + * descriptors. The status will be returned for each; any messages that failed + * to send will have a non-zero status. The caller is expected to free original + * ctlq_msgs and free or reuse the DMA buffers. + */ +int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, + struct idpf_ctlq_msg *msg_status[]) +{ + struct idpf_ctlq_desc *desc; + u16 i, num_to_clean; + u16 ntc, desc_err; + + if (*clean_count == 0) + return 0; + if (*clean_count > cq->ring_size) + return -EBADR; + + mutex_lock(&cq->cq_lock); + + ntc = cq->next_to_clean; + + num_to_clean = *clean_count; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD)) + break; + + /* strip off FW internal code */ + desc_err = le16_to_cpu(desc->ret_val) & 0xff; + + msg_status[i] = cq->bi.tx_msg[ntc]; + msg_status[i]->status = desc_err; + + cq->bi.tx_msg[ntc] = NULL; + + /* Zero out any stale data */ + memset(desc, 0, sizeof(*desc)); + + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + } + + cq->next_to_clean = ntc; + + mutex_unlock(&cq->cq_lock); + + /* Return number of descriptors actually cleaned */ + *clean_count = i; + + return 0; +} + +/** + * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring + * @hw: pointer to hw struct + * @cq: pointer to control queue handle + * @buff_count: (input|output) input is number of buffers caller is trying to + * return; output is number of buffers that were not posted + * @buffs: array of pointers to dma mem structs to be given to hardware + * + * Caller uses this function to return DMA buffers to the descriptor ring after + * consuming them; buff_count will be the number of buffers. + * + * Note: this function needs to be called after a receive call even + * if there are no DMA buffers to be returned, i.e. buff_count = 0, + * buffs = NULL to support direct commands + */ +int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 *buff_count, struct idpf_dma_mem **buffs) +{ + struct idpf_ctlq_desc *desc; + u16 ntp = cq->next_to_post; + bool buffs_avail = false; + u16 tbp = ntp + 1; + int i = 0; + + if (*buff_count > cq->ring_size) + return -EBADR; + + if (*buff_count > 0) + buffs_avail = true; + + mutex_lock(&cq->cq_lock); + + if (tbp >= cq->ring_size) + tbp = 0; + + if (tbp == cq->next_to_clean) + /* Nothing to do */ + goto post_buffs_out; + + /* Post buffers for as many as provided or up until the last one used */ + while (ntp != cq->next_to_clean) { + desc = IDPF_CTLQ_DESC(cq, ntp); + + if (cq->bi.rx_buff[ntp]) + goto fill_desc; + if (!buffs_avail) { + /* If the caller hasn't given us any buffers or + * there are none left, search the ring itself + * for an available buffer to move to this + * entry starting at the next entry in the ring + */ + tbp = ntp + 1; + + /* Wrap ring if necessary */ + if (tbp >= cq->ring_size) + tbp = 0; + + while (tbp != cq->next_to_clean) { + if (cq->bi.rx_buff[tbp]) { + cq->bi.rx_buff[ntp] = + cq->bi.rx_buff[tbp]; + cq->bi.rx_buff[tbp] = NULL; + + /* Found a buffer, no need to + * search anymore + */ + break; + } + + /* Wrap ring if necessary */ + tbp++; + if (tbp >= cq->ring_size) + tbp = 0; + } + + if (tbp == cq->next_to_clean) + goto post_buffs_out; + } else { + /* Give back pointer to DMA buffer */ + cq->bi.rx_buff[ntp] = buffs[i]; + i++; + + if (i >= *buff_count) + buffs_avail = false; + } + +fill_desc: + desc->flags = + cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); + + /* Post buffers to descriptor */ + desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size); + desc->params.indirect.addr_high = + cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa)); + desc->params.indirect.addr_low = + cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa)); + + ntp++; + if (ntp == cq->ring_size) + ntp = 0; + } + +post_buffs_out: + /* Only update tail if buffers were actually posted */ + if (cq->next_to_post != ntp) { + if (ntp) + /* Update next_to_post to ntp - 1 since current ntp + * will not have a buffer + */ + cq->next_to_post = ntp - 1; + else + /* Wrap to end of end ring since current ntp is 0 */ + cq->next_to_post = cq->ring_size - 1; + + wr32(hw, cq->reg.tail, cq->next_to_post); + } + + mutex_unlock(&cq->cq_lock); + + /* return the number of buffers that were not posted */ + *buff_count = *buff_count - i; + + return 0; +} + +/** + * idpf_ctlq_recv - receive control queue message call back + * @cq: pointer to control queue handle to receive on + * @num_q_msg: (input|output) input number of messages that should be received; + * output number of messages actually received + * @q_msg: (output) array of received control queue messages on this q; + * needs to be pre-allocated by caller for as many messages as requested + * + * Called by interrupt handler or polling mechanism. Caller is expected + * to free buffers + */ +int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + struct idpf_ctlq_msg *q_msg) +{ + u16 num_to_clean, ntc, flags; + struct idpf_ctlq_desc *desc; + int err = 0; + u16 i; + + if (*num_q_msg == 0) + return 0; + else if (*num_q_msg > cq->ring_size) + return -EBADR; + + /* take the lock before we start messing with the ring */ + mutex_lock(&cq->cq_lock); + + ntc = cq->next_to_clean; + + num_to_clean = *num_q_msg; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + flags = le16_to_cpu(desc->flags); + + if (!(flags & IDPF_CTLQ_FLAG_DD)) + break; + + q_msg[i].vmvf_type = (flags & + (IDPF_CTLQ_FLAG_FTYPE_VM | + IDPF_CTLQ_FLAG_FTYPE_PF)) >> + IDPF_CTLQ_FLAG_FTYPE_S; + + if (flags & IDPF_CTLQ_FLAG_ERR) + err = -EBADMSG; + + q_msg[i].cookie.mbx.chnl_opcode = + le32_to_cpu(desc->v_opcode_dtype); + q_msg[i].cookie.mbx.chnl_retval = + le32_to_cpu(desc->v_retval); + + q_msg[i].opcode = le16_to_cpu(desc->opcode); + q_msg[i].data_len = le16_to_cpu(desc->datalen); + q_msg[i].status = le16_to_cpu(desc->ret_val); + + if (desc->datalen) { + memcpy(q_msg[i].ctx.indirect.context, + &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE); + + /* Assign pointer to dma buffer to ctlq_msg array + * to be given to upper layer + */ + q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc]; + + /* Zero out pointer to DMA buffer info; + * will be repopulated by post buffers API + */ + cq->bi.rx_buff[ntc] = NULL; + } else { + memcpy(q_msg[i].ctx.direct, desc->params.raw, + IDPF_DIRECT_CTX_SIZE); + } + + /* Zero out stale data in descriptor */ + memset(desc, 0, sizeof(struct idpf_ctlq_desc)); + + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + } + + cq->next_to_clean = ntc; + + mutex_unlock(&cq->cq_lock); + + *num_q_msg = i; + if (*num_q_msg == 0) + err = -ENOMSG; + + return err; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h new file mode 100644 index 0000000000..c1aba09e98 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_CONTROLQ_H_ +#define _IDPF_CONTROLQ_H_ + +#include + +#include "idpf_controlq_api.h" + +/* Maximum buffer length for all control queue types */ +#define IDPF_CTLQ_MAX_BUF_LEN 4096 + +#define IDPF_CTLQ_DESC(R, i) \ + (&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i])) + +#define IDPF_CTLQ_DESC_UNUSED(R) \ + ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \ + (R)->next_to_clean - (R)->next_to_use - 1)) + +/* Control Queue default settings */ +#define IDPF_CTRL_SQ_CMD_TIMEOUT 250 /* msecs */ + +struct idpf_ctlq_desc { + /* Control queue descriptor flags */ + __le16 flags; + /* Control queue message opcode */ + __le16 opcode; + __le16 datalen; /* 0 for direct commands */ + union { + __le16 ret_val; + __le16 pfid_vfid; +#define IDPF_CTLQ_DESC_VF_ID_S 0 +#define IDPF_CTLQ_DESC_VF_ID_M (0x7FF << IDPF_CTLQ_DESC_VF_ID_S) +#define IDPF_CTLQ_DESC_PF_ID_S 11 +#define IDPF_CTLQ_DESC_PF_ID_M (0x1F << IDPF_CTLQ_DESC_PF_ID_S) + }; + + /* Virtchnl message opcode and virtchnl descriptor type + * v_opcode=[27:0], v_dtype=[31:28] + */ + __le32 v_opcode_dtype; + /* Virtchnl return value */ + __le32 v_retval; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } direct; + struct { + __le32 param0; + __le16 sw_cookie; + /* Virtchnl flags */ + __le16 v_flags; + __le32 addr_high; + __le32 addr_low; + } indirect; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID | + */ +/* command flags and offsets */ +#define IDPF_CTLQ_FLAG_DD_S 0 +#define IDPF_CTLQ_FLAG_CMP_S 1 +#define IDPF_CTLQ_FLAG_ERR_S 2 +#define IDPF_CTLQ_FLAG_FTYPE_S 6 +#define IDPF_CTLQ_FLAG_RD_S 10 +#define IDPF_CTLQ_FLAG_VFC_S 11 +#define IDPF_CTLQ_FLAG_BUF_S 12 +#define IDPF_CTLQ_FLAG_HOST_ID_S 13 + +#define IDPF_CTLQ_FLAG_DD BIT(IDPF_CTLQ_FLAG_DD_S) /* 0x1 */ +#define IDPF_CTLQ_FLAG_CMP BIT(IDPF_CTLQ_FLAG_CMP_S) /* 0x2 */ +#define IDPF_CTLQ_FLAG_ERR BIT(IDPF_CTLQ_FLAG_ERR_S) /* 0x4 */ +#define IDPF_CTLQ_FLAG_FTYPE_VM BIT(IDPF_CTLQ_FLAG_FTYPE_S) /* 0x40 */ +#define IDPF_CTLQ_FLAG_FTYPE_PF BIT(IDPF_CTLQ_FLAG_FTYPE_S + 1) /* 0x80 */ +#define IDPF_CTLQ_FLAG_RD BIT(IDPF_CTLQ_FLAG_RD_S) /* 0x400 */ +#define IDPF_CTLQ_FLAG_VFC BIT(IDPF_CTLQ_FLAG_VFC_S) /* 0x800 */ +#define IDPF_CTLQ_FLAG_BUF BIT(IDPF_CTLQ_FLAG_BUF_S) /* 0x1000 */ + +/* Host ID is a special field that has 3b and not a 1b flag */ +#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S) + +struct idpf_mbxq_desc { + u8 pad[8]; /* CTLQ flags/opcode/len/retval fields */ + u32 chnl_opcode; /* avoid confusion with desc->opcode */ + u32 chnl_retval; /* ditto for desc->retval */ + u32 pf_vf_id; /* used by CP when sending to PF */ +}; + +/* Define the driver hardware struct to replace other control structs as needed + * Align to ctlq_hw_info + */ +struct idpf_hw { + void __iomem *hw_addr; + resource_size_t hw_addr_len; + + struct idpf_adapter *back; + + /* control queue - send and receive */ + struct idpf_ctlq_info *asq; + struct idpf_ctlq_info *arq; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + + struct list_head cq_list_head; +}; + +int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, + struct idpf_ctlq_info *cq); + +void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq); + +/* prototype for functions used for dynamic memory allocation */ +void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, + u64 size); +void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem); +#endif /* _IDPF_CONTROLQ_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h new file mode 100644 index 0000000000..8dee098bbf --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_CONTROLQ_API_H_ +#define _IDPF_CONTROLQ_API_H_ + +#include "idpf_mem.h" + +struct idpf_hw; + +/* Used for queue init, response and events */ +enum idpf_ctlq_type { + IDPF_CTLQ_TYPE_MAILBOX_TX = 0, + IDPF_CTLQ_TYPE_MAILBOX_RX = 1, + IDPF_CTLQ_TYPE_CONFIG_TX = 2, + IDPF_CTLQ_TYPE_CONFIG_RX = 3, + IDPF_CTLQ_TYPE_EVENT_RX = 4, + IDPF_CTLQ_TYPE_RDMA_TX = 5, + IDPF_CTLQ_TYPE_RDMA_RX = 6, + IDPF_CTLQ_TYPE_RDMA_COMPL = 7 +}; + +/* Generic Control Queue Structures */ +struct idpf_ctlq_reg { + /* used for queue tracking */ + u32 head; + u32 tail; + /* Below applies only to default mb (if present) */ + u32 len; + u32 bah; + u32 bal; + u32 len_mask; + u32 len_ena_mask; + u32 head_mask; +}; + +/* Generic queue msg structure */ +struct idpf_ctlq_msg { + u8 vmvf_type; /* represents the source of the message on recv */ +#define IDPF_VMVF_TYPE_VF 0 +#define IDPF_VMVF_TYPE_VM 1 +#define IDPF_VMVF_TYPE_PF 2 + u8 host_id; + /* 3b field used only when sending a message to CP - to be used in + * combination with target func_id to route the message + */ +#define IDPF_HOST_ID_MASK 0x7 + + u16 opcode; + u16 data_len; /* data_len = 0 when no payload is attached */ + union { + u16 func_id; /* when sending a message */ + u16 status; /* when receiving a message */ + }; + union { + struct { + u32 chnl_opcode; + u32 chnl_retval; + } mbx; + } cookie; + union { +#define IDPF_DIRECT_CTX_SIZE 16 +#define IDPF_INDIRECT_CTX_SIZE 8 + /* 16 bytes of context can be provided or 8 bytes of context + * plus the address of a DMA buffer + */ + u8 direct[IDPF_DIRECT_CTX_SIZE]; + struct { + u8 context[IDPF_INDIRECT_CTX_SIZE]; + struct idpf_dma_mem *payload; + } indirect; + } ctx; +}; + +/* Generic queue info structures */ +/* MB, CONFIG and EVENT q do not have extended info */ +struct idpf_ctlq_create_info { + enum idpf_ctlq_type type; + int id; /* absolute queue offset passed as input + * -1 for default mailbox if present + */ + u16 len; /* Queue length passed as input */ + u16 buf_size; /* buffer size passed as input */ + u64 base_address; /* output, HPA of the Queue start */ + struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */ + + int ext_info_size; + void *ext_info; /* Specific to q type */ +}; + +/* Control Queue information */ +struct idpf_ctlq_info { + struct list_head cq_list; + + enum idpf_ctlq_type cq_type; + int q_id; + struct mutex cq_lock; /* control queue lock */ + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_post; /* starting descriptor to post buffers + * to after recev + */ + + struct idpf_dma_mem desc_ring; /* descriptor ring memory + * idpf_dma_mem is defined in OSdep.h + */ + union { + struct idpf_dma_mem **rx_buff; + struct idpf_ctlq_msg **tx_msg; + } bi; + + u16 buf_size; /* queue buffer size */ + u16 ring_size; /* Number of descriptors */ + struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */ +}; + +/** + * enum idpf_mbx_opc - PF/VF mailbox commands + * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP + */ +enum idpf_mbx_opc { + idpf_mbq_opc_send_msg_to_cp = 0x0801, +}; + +/* API supported for control queue management */ +/* Will init all required q including default mb. "q_info" is an array of + * create_info structs equal to the number of control queues to be created. + */ +int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, + struct idpf_ctlq_create_info *q_info); + +/* Allocate and initialize a single control queue, which will be added to the + * control queue list; returns a handle to the created control queue + */ +int idpf_ctlq_add(struct idpf_hw *hw, + struct idpf_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq); + +/* Deinitialize and deallocate a single control queue */ +void idpf_ctlq_remove(struct idpf_hw *hw, + struct idpf_ctlq_info *cq); + +/* Sends messages to HW and will also free the buffer*/ +int idpf_ctlq_send(struct idpf_hw *hw, + struct idpf_ctlq_info *cq, + u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]); + +/* Receives messages and called by interrupt handler/polling + * initiated by app/process. Also caller is supposed to free the buffers + */ +int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + struct idpf_ctlq_msg *q_msg); + +/* Reclaims send descriptors on HW write back */ +int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, + struct idpf_ctlq_msg *msg_status[]); + +/* Indicate RX buffers are done being processed */ +int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, + struct idpf_ctlq_info *cq, + u16 *buff_count, + struct idpf_dma_mem **buffs); + +/* Will destroy all q including the default mb */ +void idpf_ctlq_deinit(struct idpf_hw *hw); + +#endif /* _IDPF_CONTROLQ_API_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c new file mode 100644 index 0000000000..a942a6385d --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf_controlq.h" + +/** + * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + */ +static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc); + + cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size); + if (!cq->desc_ring.va) + return -ENOMEM; + + return 0; +} + +/** + * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * Allocate the buffer head for all control queues, and if it's a receive + * queue, allocate DMA buffers + */ +static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + int i; + + /* Do not allocate DMA buffers for transmit queues */ + if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) + return 0; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *), + GFP_KERNEL); + if (!cq->bi.rx_buff) + return -ENOMEM; + + /* allocate the mapped buffers (except for the last one) */ + for (i = 0; i < cq->ring_size - 1; i++) { + struct idpf_dma_mem *bi; + int num = 1; /* number of idpf_dma_mem to be allocated */ + + cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem), + GFP_KERNEL); + if (!cq->bi.rx_buff[i]) + goto unwind_alloc_cq_bufs; + + bi = cq->bi.rx_buff[i]; + + bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size); + if (!bi->va) { + /* unwind will not free the failed entry */ + kfree(cq->bi.rx_buff[i]); + goto unwind_alloc_cq_bufs; + } + } + + return 0; + +unwind_alloc_cq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) { + idpf_free_dma_mem(hw, cq->bi.rx_buff[i]); + kfree(cq->bi.rx_buff[i]); + } + kfree(cq->bi.rx_buff); + + return -ENOMEM; +} + +/** + * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + */ +static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + idpf_free_dma_mem(hw, &cq->desc_ring); +} + +/** + * idpf_ctlq_free_bufs - Free CQ buffer info elements + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX + * queues. The upper layers are expected to manage freeing of TX DMA buffers + */ +static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + void *bi; + + if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) { + int i; + + /* free DMA buffers for rx queues*/ + for (i = 0; i < cq->ring_size; i++) { + if (cq->bi.rx_buff[i]) { + idpf_free_dma_mem(hw, cq->bi.rx_buff[i]); + kfree(cq->bi.rx_buff[i]); + } + } + + bi = (void *)cq->bi.rx_buff; + } else { + bi = (void *)cq->bi.tx_msg; + } + + /* free the buffer header */ + kfree(bi); +} + +/** + * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * Free the memory used by the ring, buffers and other related structures + */ +void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + /* free ring buffers and the ring itself */ + idpf_ctlq_free_bufs(hw, cq); + idpf_ctlq_free_desc_ring(hw, cq); +} + +/** + * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs + * @hw: pointer to hw struct + * @cq: pointer to control queue struct + * + * Do *NOT* hold cq_lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + int err; + + /* allocate the ring memory */ + err = idpf_ctlq_alloc_desc_ring(hw, cq); + if (err) + return err; + + /* allocate buffers in the rings */ + err = idpf_ctlq_alloc_bufs(hw, cq); + if (err) + goto idpf_init_cq_free_ring; + + /* success! */ + return 0; + +idpf_init_cq_free_ring: + idpf_free_dma_mem(hw, &cq->desc_ring); + + return err; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c new file mode 100644 index 0000000000..34ad1ac46b --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" +#include "idpf_lan_pf_regs.h" + +#define IDPF_PF_ITR_IDX_SPACING 0x4 + +/** + * idpf_ctlq_reg_init - initialize default mailbox registers + * @cq: pointer to the array of create control queues + */ +static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +{ + int i; + + for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { + struct idpf_ctlq_create_info *ccq = cq + i; + + switch (ccq->type) { + case IDPF_CTLQ_TYPE_MAILBOX_TX: + /* set head and tail registers in our local struct */ + ccq->reg.head = PF_FW_ATQH; + ccq->reg.tail = PF_FW_ATQT; + ccq->reg.len = PF_FW_ATQLEN; + ccq->reg.bah = PF_FW_ATQBAH; + ccq->reg.bal = PF_FW_ATQBAL; + ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M; + ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; + ccq->reg.head_mask = PF_FW_ATQH_ATQH_M; + break; + case IDPF_CTLQ_TYPE_MAILBOX_RX: + /* set head and tail registers in our local struct */ + ccq->reg.head = PF_FW_ARQH; + ccq->reg.tail = PF_FW_ARQT; + ccq->reg.len = PF_FW_ARQLEN; + ccq->reg.bah = PF_FW_ARQBAH; + ccq->reg.bal = PF_FW_ARQBAL; + ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M; + ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; + ccq->reg.head_mask = PF_FW_ARQH_ARQH_M; + break; + default: + break; + } + } +} + +/** + * idpf_mb_intr_reg_init - Initialize mailbox interrupt register + * @adapter: adapter structure + */ +static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter) +{ + struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; + u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl); + + intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl); + intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M; + intr->dyn_ctl_itridx_m = PF_GLINT_DYN_CTL_ITR_INDX_M; + intr->icr_ena = idpf_get_reg_addr(adapter, PF_INT_DIR_OICR_ENA); + intr->icr_ena_ctlq_m = PF_INT_DIR_OICR_ENA_M; +} + +/** + * idpf_intr_reg_init - Initialize interrupt registers + * @vport: virtual port structure + */ +static int idpf_intr_reg_init(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + int num_vecs = vport->num_q_vectors; + struct idpf_vec_regs *reg_vals; + int num_regs, i, err = 0; + u32 rx_itr, tx_itr; + u16 total_vecs; + + total_vecs = idpf_get_reserved_vecs(vport->adapter); + reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs), + GFP_KERNEL); + if (!reg_vals) + return -ENOMEM; + + num_regs = idpf_get_reg_intr_vecs(vport, reg_vals); + if (num_regs < num_vecs) { + err = -EINVAL; + goto free_reg_vals; + } + + for (i = 0; i < num_vecs; i++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[i]; + u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC; + struct idpf_intr_reg *intr = &q_vector->intr_reg; + u32 spacing; + + intr->dyn_ctl = idpf_get_reg_addr(adapter, + reg_vals[vec_id].dyn_ctl_reg); + intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M; + intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S; + intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S; + + spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, + IDPF_PF_ITR_IDX_SPACING); + rx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_0, + reg_vals[vec_id].itrn_reg, + spacing); + tx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_1, + reg_vals[vec_id].itrn_reg, + spacing); + intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr); + intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr); + } + +free_reg_vals: + kfree(reg_vals); + + return err; +} + +/** + * idpf_reset_reg_init - Initialize reset registers + * @adapter: Driver specific private structure + */ +static void idpf_reset_reg_init(struct idpf_adapter *adapter) +{ + adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT); + adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M; +} + +/** + * idpf_trigger_reset - trigger reset + * @adapter: Driver specific private structure + * @trig_cause: Reason to trigger a reset + */ +static void idpf_trigger_reset(struct idpf_adapter *adapter, + enum idpf_flags __always_unused trig_cause) +{ + u32 reset_reg; + + reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL)); + writel(reset_reg | PFGEN_CTRL_PFSWR, + idpf_get_reg_addr(adapter, PFGEN_CTRL)); +} + +/** + * idpf_reg_ops_init - Initialize register API function pointers + * @adapter: Driver specific private structure + */ +static void idpf_reg_ops_init(struct idpf_adapter *adapter) +{ + adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_ctlq_reg_init; + adapter->dev_ops.reg_ops.intr_reg_init = idpf_intr_reg_init; + adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init; + adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init; + adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset; +} + +/** + * idpf_dev_ops_init - Initialize device API function pointers + * @adapter: Driver specific private structure + */ +void idpf_dev_ops_init(struct idpf_adapter *adapter) +{ + idpf_reg_ops_init(adapter); +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_devids.h b/drivers/net/ethernet/intel/idpf/idpf_devids.h new file mode 100644 index 0000000000..5154a52ae6 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_devids.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_DEVIDS_H_ +#define _IDPF_DEVIDS_H_ + +#define IDPF_DEV_ID_PF 0x1452 +#define IDPF_DEV_ID_VF 0x145C + +#endif /* _IDPF_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c new file mode 100644 index 0000000000..52ea38669f --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + */ +static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 __always_unused *rule_locs) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vport->num_rxq; + idpf_vport_ctrl_unlock(netdev); + + return 0; + default: + break; + } + + idpf_vport_ctrl_unlock(netdev); + + return -EOPNOTSUPP; +} + +/** + * idpf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the key size on success, error value on failure. + */ +static u32 idpf_get_rxfh_key_size(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + + if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) + return -EOPNOTSUPP; + + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + + return user_config->rss_data.rss_key_size; +} + +/** + * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size on success, error value on failure. + */ +static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + + if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) + return -EOPNOTSUPP; + + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + + return user_config->rss_data.rss_lut_size; +} + +/** + * idpf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + */ +static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_rss_data *rss_data; + struct idpf_adapter *adapter; + int err = 0; + u16 i; + + idpf_vport_ctrl_lock(netdev); + + adapter = np->adapter; + + if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data; + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (key) + memcpy(key, rss_data->rss_key, rss_data->rss_key_size); + + if (indir) { + for (i = 0; i < rss_data->rss_lut_size; i++) + indir[i] = rss_data->rss_lut[i]; + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function to use + * + * Returns -EINVAL if the table specifies an invalid queue id, otherwise + * returns 0 after programming the table. + */ +static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_rss_data *rss_data; + struct idpf_adapter *adapter; + struct idpf_vport *vport; + int err = 0; + u16 lut; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + adapter = vport->adapter; + + if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + if (key) + memcpy(rss_data->rss_key, key, rss_data->rss_key_size); + + if (indir) { + for (lut = 0; lut < rss_data->rss_lut_size; lut++) + rss_data->rss_lut[lut] = indir[lut]; + } + + err = idpf_config_rss(vport); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * Report maximum of TX and RX. Report one extra channel to match our MailBox + * Queue. + */ +static void idpf_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + u16 num_txq, num_rxq; + u16 combined; + + vport_config = np->adapter->vport_config[np->vport_idx]; + + num_txq = vport_config->user_config.num_req_tx_qs; + num_rxq = vport_config->user_config.num_req_rx_qs; + + combined = min(num_txq, num_rxq); + + /* Report maximum channels */ + ch->max_combined = min_t(u16, vport_config->max_q.max_txq, + vport_config->max_q.max_rxq); + ch->max_rx = vport_config->max_q.max_rxq; + ch->max_tx = vport_config->max_q.max_txq; + + ch->max_other = IDPF_MAX_MBXQ; + ch->other_count = IDPF_MAX_MBXQ; + + ch->combined_count = combined; + ch->rx_count = num_rxq - combined; + ch->tx_count = num_txq - combined; +} + +/** + * idpf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with CP. Returns 0 on success, negative + * on failure. + */ +static int idpf_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct idpf_vport_config *vport_config; + u16 combined, num_txq, num_rxq; + unsigned int num_req_tx_q; + unsigned int num_req_rx_q; + struct idpf_vport *vport; + struct device *dev; + int err = 0; + u16 idx; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + idx = vport->idx; + vport_config = vport->adapter->vport_config[idx]; + + num_txq = vport_config->user_config.num_req_tx_qs; + num_rxq = vport_config->user_config.num_req_rx_qs; + + combined = min(num_txq, num_rxq); + + /* these checks are for cases where user didn't specify a particular + * value on cmd line but we get non-zero value anyway via + * get_channels(); look at ethtool.c in ethtool repository (the user + * space part), particularly, do_schannels() routine + */ + if (ch->combined_count == combined) + ch->combined_count = 0; + if (ch->combined_count && ch->rx_count == num_rxq - combined) + ch->rx_count = 0; + if (ch->combined_count && ch->tx_count == num_txq - combined) + ch->tx_count = 0; + + num_req_tx_q = ch->combined_count + ch->tx_count; + num_req_rx_q = ch->combined_count + ch->rx_count; + + dev = &vport->adapter->pdev->dev; + /* It's possible to specify number of queues that exceeds max. + * Stack checks max combined_count and max [tx|rx]_count but not the + * max combined_count + [tx|rx]_count. These checks should catch that. + */ + if (num_req_tx_q > vport_config->max_q.max_txq) { + dev_info(dev, "Maximum TX queues is %d\n", + vport_config->max_q.max_txq); + err = -EINVAL; + goto unlock_mutex; + } + if (num_req_rx_q > vport_config->max_q.max_rxq) { + dev_info(dev, "Maximum RX queues is %d\n", + vport_config->max_q.max_rxq); + err = -EINVAL; + goto unlock_mutex; + } + + if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq) + goto unlock_mutex; + + vport_config->user_config.num_req_tx_qs = num_req_tx_q; + vport_config->user_config.num_req_rx_qs = num_req_rx_q; + + err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); + if (err) { + /* roll back queue change */ + vport_config->user_config.num_req_tx_qs = num_txq; + vport_config->user_config.num_req_rx_qs = num_rxq; + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_ringparam - Get ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * @kring: unused + * @ext_ack: unused + * + * Returns current ring parameters. TX and RX rings are reported separately, + * but the number of rings is not reported. + */ +static void idpf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + ring->rx_max_pending = IDPF_MAX_RXQ_DESC; + ring->tx_max_pending = IDPF_MAX_TXQ_DESC; + ring->rx_pending = vport->rxq_desc_count; + ring->tx_pending = vport->txq_desc_count; + + idpf_vport_ctrl_unlock(netdev); +} + +/** + * idpf_set_ringparam - Set ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * @kring: unused + * @ext_ack: unused + * + * Sets ring parameters. TX and RX rings are controlled separately, but the + * number of rings is not specified, so all rings get the same settings. + */ +static int idpf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +{ + struct idpf_vport_user_config_data *config_data; + u32 new_rx_count, new_tx_count; + struct idpf_vport *vport; + int i, err = 0; + u16 idx; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + idx = vport->idx; + + if (ring->tx_pending < IDPF_MIN_TXQ_DESC) { + netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n", + ring->tx_pending, + IDPF_MIN_TXQ_DESC); + err = -EINVAL; + goto unlock_mutex; + } + + if (ring->rx_pending < IDPF_MIN_RXQ_DESC) { + netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n", + ring->rx_pending, + IDPF_MIN_RXQ_DESC); + err = -EINVAL; + goto unlock_mutex; + } + + new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE); + if (new_rx_count != ring->rx_pending) + netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n", + new_rx_count); + + new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE); + if (new_tx_count != ring->tx_pending) + netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n", + new_tx_count); + + if (new_tx_count == vport->txq_desc_count && + new_rx_count == vport->rxq_desc_count) + goto unlock_mutex; + + config_data = &vport->adapter->vport_config[idx]->user_config; + config_data->num_req_txq_desc = new_tx_count; + config_data->num_req_rxq_desc = new_rx_count; + + /* Since we adjusted the RX completion queue count, the RX buffer queue + * descriptor count needs to be adjusted as well + */ + for (i = 0; i < vport->num_bufqs_per_qgrp; i++) + vport->bufq_desc_count[i] = + IDPF_RX_BUFQ_DESC_COUNT(new_rx_count, + vport->num_bufqs_per_qgrp); + + err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * struct idpf_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the IDPF_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the idpf_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the idpf_add_stat_string() helper function. + */ +struct idpf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro to define an idpf_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ +#define IDPF_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +/* Helper macro for defining some statistics related to queues */ +#define IDPF_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_queue, _name, _stat) + +/* Stats associated with a Tx queue */ +static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { + IDPF_QUEUE_STAT("pkts", q_stats.tx.packets), + IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes), + IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts), +}; + +/* Stats associated with an Rx queue */ +static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { + IDPF_QUEUE_STAT("pkts", q_stats.rx.packets), + IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes), + IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts), +}; + +#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) +#define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats) + +#define IDPF_PORT_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_vport, _name, _stat) + +static const struct idpf_stats idpf_gstrings_port_stats[] = { + IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err), + IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit), + IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo), + IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs), + IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops), + IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs), + IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize), + IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy), + IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast), + IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast), + IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast), + IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol), + IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast), + IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast), + IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast), +}; + +#define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats) + +/** + * __idpf_add_qstat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * @type: stat type + * @idx: stat index + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + */ +static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats, + const unsigned int size, const char *type, + unsigned int idx) +{ + unsigned int i; + + for (i = 0; i < size; i++) + ethtool_sprintf(p, "%s_q-%u_%s", + type, idx, stats[i].stat_string); +} + +/** + * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @type: stat type + * @idx: stat idx + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + */ +#define idpf_add_qstat_strings(p, stats, type, idx) \ + __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx) + +/** + * idpf_add_stat_strings - Copy port stat strings into ethtool buffer + * @p: ethtool buffer + * @stats: struct to copy from + * @size: size of stats array to copy from + */ +static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats, + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + ethtool_sprintf(p, "%s", stats[i].stat_string); +} + +/** + * idpf_get_stat_strings - Get stat strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the statistics string table + */ +static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + unsigned int i; + + idpf_add_stat_strings(&data, idpf_gstrings_port_stats, + IDPF_PORT_STATS_LEN); + + vport_config = np->adapter->vport_config[np->vport_idx]; + /* It's critical that we always report a constant number of strings and + * that the strings are reported in the same order regardless of how + * many queues are actually in use. + */ + for (i = 0; i < vport_config->max_q.max_txq; i++) + idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats, + "tx", i); + + for (i = 0; i < vport_config->max_q.max_rxq; i++) + idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, + "rx", i); + + page_pool_ethtool_stats_get_strings(data); +} + +/** + * idpf_get_strings - Get string set + * @netdev: network interface device structure + * @sset: id of string set + * @data: buffer for string data + * + * Builds string tables for various string sets + */ +static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + idpf_get_stat_strings(netdev, data); + break; + default: + break; + } +} + +/** + * idpf_get_sset_count - Get length of string set + * @netdev: network interface device structure + * @sset: id of string set + * + * Reports size of various string tables. + */ +static int idpf_get_sset_count(struct net_device *netdev, int sset) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + u16 max_txq, max_rxq; + unsigned int size; + + if (sset != ETH_SS_STATS) + return -EINVAL; + + vport_config = np->adapter->vport_config[np->vport_idx]; + /* This size reported back here *must* be constant throughout the + * lifecycle of the netdevice, i.e. we must report the maximum length + * even for queues that don't technically exist. This is due to the + * fact that this userspace API uses three separate ioctl calls to get + * stats data but has no way to communicate back to userspace when that + * size has changed, which can typically happen as a result of changing + * number of queues. If the number/order of stats change in the middle + * of this call chain it will lead to userspace crashing/accessing bad + * data through buffer under/overflow. + */ + max_txq = vport_config->max_q.max_txq; + max_rxq = vport_config->max_q.max_rxq; + + size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + + (IDPF_RX_QUEUE_STATS_LEN * max_rxq); + size += page_pool_ethtool_stats_get_count(); + + return size; +} + +/** + * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pstat: old stat pointer to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. If the pointer is null, data will be zero'd. + */ +static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, + const struct idpf_stats *stat) +{ + char *p; + + if (!pstat) { + /* Ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pstat + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * idpf_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @q: the queue to copy + * + * Queue statistics must be copied while protected by u64_stats_fetch_begin, + * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats + * are defined in idpf_gstrings_queue_stats. If the queue pointer is null, + * zero out the queue stat values and update the data pointer. Otherwise + * safely copy the stats from the queue into the supplied buffer and update + * the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + */ +static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) +{ + const struct idpf_stats *stats; + unsigned int start; + unsigned int size; + unsigned int i; + + if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + size = IDPF_RX_QUEUE_STATS_LEN; + stats = idpf_gstrings_rx_queue_stats; + } else { + size = IDPF_TX_QUEUE_STATS_LEN; + stats = idpf_gstrings_tx_queue_stats; + } + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry. + */ + do { + start = u64_stats_fetch_begin(&q->stats_sync); + for (i = 0; i < size; i++) + idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); + } while (u64_stats_fetch_retry(&q->stats_sync, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * idpf_add_empty_queue_stats - Add stats for a non-existent queue + * @data: pointer to data buffer + * @qtype: type of data queue + * + * We must report a constant length of stats back to userspace regardless of + * how many queues are actually in use because stats collection happens over + * three separate ioctls and there's no way to notify userspace the size + * changed between those calls. This adds empty to data to the stats since we + * don't have a real queue to refer to for this stats slot. + */ +static void idpf_add_empty_queue_stats(u64 **data, u16 qtype) +{ + unsigned int i; + int stats_len; + + if (qtype == VIRTCHNL2_QUEUE_TYPE_RX) + stats_len = IDPF_RX_QUEUE_STATS_LEN; + else + stats_len = IDPF_TX_QUEUE_STATS_LEN; + + for (i = 0; i < stats_len; i++) + (*data)[i] = 0; + *data += stats_len; +} + +/** + * idpf_add_port_stats - Copy port stats into ethtool buffer + * @vport: virtual port struct + * @data: ethtool buffer to copy into + */ +static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data) +{ + unsigned int size = IDPF_PORT_STATS_LEN; + unsigned int start; + unsigned int i; + + do { + start = u64_stats_fetch_begin(&vport->port_stats.stats_sync); + for (i = 0; i < size; i++) + idpf_add_one_ethtool_stat(&(*data)[i], vport, + &idpf_gstrings_port_stats[i]); + } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start)); + + *data += size; +} + +/** + * idpf_collect_queue_stats - accumulate various per queue stats + * into port level stats + * @vport: pointer to vport struct + **/ +static void idpf_collect_queue_stats(struct idpf_vport *vport) +{ + struct idpf_port_stats *pstats = &vport->port_stats; + int i, j; + + /* zero out port stats since they're actually tracked in per + * queue stats; this is only for reporting + */ + u64_stats_update_begin(&pstats->stats_sync); + u64_stats_set(&pstats->rx_hw_csum_err, 0); + u64_stats_set(&pstats->rx_hsplit, 0); + u64_stats_set(&pstats->rx_hsplit_hbo, 0); + u64_stats_set(&pstats->rx_bad_descs, 0); + u64_stats_set(&pstats->tx_linearize, 0); + u64_stats_set(&pstats->tx_busy, 0); + u64_stats_set(&pstats->tx_drops, 0); + u64_stats_set(&pstats->tx_dma_map_errs, 0); + u64_stats_update_end(&pstats->stats_sync); + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; + u16 num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rxq_grp->splitq.num_rxq_sets; + else + num_rxq = rxq_grp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; + struct idpf_rx_queue_stats *stats; + struct idpf_queue *rxq; + unsigned int start; + + if (idpf_is_queue_model_split(vport->rxq_model)) + rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; + else + rxq = rxq_grp->singleq.rxqs[j]; + + if (!rxq) + continue; + + do { + start = u64_stats_fetch_begin(&rxq->stats_sync); + + stats = &rxq->q_stats.rx; + hw_csum_err = u64_stats_read(&stats->hw_csum_err); + hsplit = u64_stats_read(&stats->hsplit_pkts); + hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); + bad_descs = u64_stats_read(&stats->bad_descs); + } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); + + u64_stats_update_begin(&pstats->stats_sync); + u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err); + u64_stats_add(&pstats->rx_hsplit, hsplit); + u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo); + u64_stats_add(&pstats->rx_bad_descs, bad_descs); + u64_stats_update_end(&pstats->stats_sync); + } + } + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + for (j = 0; j < txq_grp->num_txq; j++) { + u64 linearize, qbusy, skb_drops, dma_map_errs; + struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue_stats *stats; + unsigned int start; + + if (!txq) + continue; + + do { + start = u64_stats_fetch_begin(&txq->stats_sync); + + stats = &txq->q_stats.tx; + linearize = u64_stats_read(&stats->linearize); + qbusy = u64_stats_read(&stats->q_busy); + skb_drops = u64_stats_read(&stats->skb_drops); + dma_map_errs = u64_stats_read(&stats->dma_map_errs); + } while (u64_stats_fetch_retry(&txq->stats_sync, start)); + + u64_stats_update_begin(&pstats->stats_sync); + u64_stats_add(&pstats->tx_linearize, linearize); + u64_stats_add(&pstats->tx_busy, qbusy); + u64_stats_add(&pstats->tx_drops, skb_drops); + u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs); + u64_stats_update_end(&pstats->stats_sync); + } + } +} + +/** + * idpf_get_ethtool_stats - report device statistics + * @netdev: network interface device structure + * @stats: ethtool statistics structure + * @data: pointer to data buffer + * + * All statistics are added to the data buffer as an array of u64. + */ +static void idpf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + struct page_pool_stats pp_stats = { }; + struct idpf_vport *vport; + unsigned int total = 0; + unsigned int i, j; + bool is_splitq; + u16 qtype; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (np->state != __IDPF_VPORT_UP) { + idpf_vport_ctrl_unlock(netdev); + + return; + } + + rcu_read_lock(); + + idpf_collect_queue_stats(vport); + idpf_add_port_stats(vport, &data); + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + qtype = VIRTCHNL2_QUEUE_TYPE_TX; + + for (j = 0; j < txq_grp->num_txq; j++, total++) { + struct idpf_queue *txq = txq_grp->txqs[j]; + + if (!txq) + idpf_add_empty_queue_stats(&data, qtype); + else + idpf_add_queue_stats(&data, txq); + } + } + + vport_config = vport->adapter->vport_config[vport->idx]; + /* It is critical we provide a constant number of stats back to + * userspace regardless of how many queues are actually in use because + * there is no way to inform userspace the size has changed between + * ioctl calls. This will fill in any missing stats with zero. + */ + for (; total < vport_config->max_q.max_txq; total++) + idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX); + total = 0; + + is_splitq = idpf_is_queue_model_split(vport->rxq_model); + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; + u16 num_rxq; + + qtype = VIRTCHNL2_QUEUE_TYPE_RX; + + if (is_splitq) + num_rxq = rxq_grp->splitq.num_rxq_sets; + else + num_rxq = rxq_grp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, total++) { + struct idpf_queue *rxq; + + if (is_splitq) + rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; + else + rxq = rxq_grp->singleq.rxqs[j]; + if (!rxq) + idpf_add_empty_queue_stats(&data, qtype); + else + idpf_add_queue_stats(&data, rxq); + + /* In splitq mode, don't get page pool stats here since + * the pools are attached to the buffer queues + */ + if (is_splitq) + continue; + + if (rxq) + page_pool_get_stats(rxq->pp, &pp_stats); + } + } + + for (i = 0; i < vport->num_rxq_grp; i++) { + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_queue *rxbufq = + &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; + + page_pool_get_stats(rxbufq->pp, &pp_stats); + } + } + + for (; total < vport_config->max_q.max_rxq; total++) + idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); + + page_pool_ethtool_stats_get(data, &pp_stats); + + rcu_read_unlock(); + + idpf_vport_ctrl_unlock(netdev); +} + +/** + * idpf_find_rxq - find rxq from q index + * @vport: virtual port associated to queue + * @q_num: q index used to find queue + * + * returns pointer to rx queue + */ +static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) +{ + int q_grp, q_idx; + + if (!idpf_is_queue_model_split(vport->rxq_model)) + return vport->rxq_grps->singleq.rxqs[q_num]; + + q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; + q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; + + return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; +} + +/** + * idpf_find_txq - find txq from q index + * @vport: virtual port associated to queue + * @q_num: q index used to find queue + * + * returns pointer to tx queue + */ +static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) +{ + int q_grp; + + if (!idpf_is_queue_model_split(vport->txq_model)) + return vport->txqs[q_num]; + + q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; + + return vport->txq_grps[q_grp].complq; +} + +/** + * __idpf_get_q_coalesce - get ITR values for specific queue + * @ec: ethtool structure to fill with driver's coalesce settings + * @q: quuee of Rx or Tx + */ +static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, + struct idpf_queue *q) +{ + if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + ec->use_adaptive_rx_coalesce = + IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); + ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; + } else { + ec->use_adaptive_tx_coalesce = + IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); + ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; + } +} + +/** + * idpf_get_q_coalesce - get ITR values for specific queue + * @netdev: pointer to the netdev associated with this query + * @ec: coalesce settings to program the device with + * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index + * + * Return 0 on success, and negative on failure + */ +static int idpf_get_q_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + u32 q_num) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + if (q_num >= vport->num_rxq && q_num >= vport->num_txq) { + err = -EINVAL; + goto unlock_mutex; + } + + if (q_num < vport->num_rxq) + __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num)); + + if (q_num < vport->num_txq) + __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num)); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_coalesce - get ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @ec: coalesce settings to be filled + * @kec: unused + * @extack: unused + * + * Return 0 on success, and negative on failure + */ +static int idpf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *extack) +{ + /* Return coalesce based on queue number zero */ + return idpf_get_q_coalesce(netdev, ec, 0); +} + +/** + * idpf_get_per_q_coalesce - get ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @q_num: queue for which the itr values has to retrieved + * @ec: coalesce settings to be filled + * + * Return 0 on success, and negative on failure + */ + +static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) +{ + return idpf_get_q_coalesce(netdev, ec, q_num); +} + +/** + * __idpf_set_q_coalesce - set ITR values for specific queue + * @ec: ethtool structure from user to update ITR settings + * @q: queue for which itr values has to be set + * @is_rxq: is queue type rx + * + * Returns 0 on success, negative otherwise. + */ +static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, + struct idpf_queue *q, bool is_rxq) +{ + u32 use_adaptive_coalesce, coalesce_usecs; + struct idpf_q_vector *qv = q->q_vector; + bool is_dim_ena = false; + u16 itr_val; + + if (is_rxq) { + is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); + use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; + coalesce_usecs = ec->rx_coalesce_usecs; + itr_val = qv->rx_itr_value; + } else { + is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); + use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; + coalesce_usecs = ec->tx_coalesce_usecs; + itr_val = qv->tx_itr_value; + } + if (coalesce_usecs != itr_val && use_adaptive_coalesce) { + netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); + + return -EINVAL; + } + + if (is_dim_ena && use_adaptive_coalesce) + return 0; + + if (coalesce_usecs > IDPF_ITR_MAX) { + netdev_err(q->vport->netdev, + "Invalid value, %d-usecs range is 0-%d\n", + coalesce_usecs, IDPF_ITR_MAX); + + return -EINVAL; + } + + if (coalesce_usecs % 2) { + coalesce_usecs--; + netdev_info(q->vport->netdev, + "HW only supports even ITR values, ITR rounded to %d\n", + coalesce_usecs); + } + + if (is_rxq) { + qv->rx_itr_value = coalesce_usecs; + if (use_adaptive_coalesce) { + qv->rx_intr_mode = IDPF_ITR_DYNAMIC; + } else { + qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, qv->rx_itr_value, + false); + } + } else { + qv->tx_itr_value = coalesce_usecs; + if (use_adaptive_coalesce) { + qv->tx_intr_mode = IDPF_ITR_DYNAMIC; + } else { + qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true); + } + } + + /* Update of static/dynamic itr will be taken care when interrupt is + * fired + */ + return 0; +} + +/** + * idpf_set_q_coalesce - set ITR values for specific queue + * @vport: vport associated to the queue that need updating + * @ec: coalesce settings to program the device with + * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index + * @is_rxq: is queue type rx + * + * Return 0 on success, and negative on failure + */ +static int idpf_set_q_coalesce(struct idpf_vport *vport, + struct ethtool_coalesce *ec, + int q_num, bool is_rxq) +{ + struct idpf_queue *q; + + q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); + + if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) + return -EINVAL; + + return 0; +} + +/** + * idpf_set_coalesce - set ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @ec: coalesce settings to program the device with + * @kec: unused + * @extack: unused + * + * Return 0 on success, and negative on failure + */ +static int idpf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *extack) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int i, err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + for (i = 0; i < vport->num_txq; i++) { + err = idpf_set_q_coalesce(vport, ec, i, false); + if (err) + goto unlock_mutex; + } + + for (i = 0; i < vport->num_rxq; i++) { + err = idpf_set_q_coalesce(vport, ec, i, true); + if (err) + goto unlock_mutex; + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_set_per_q_coalesce - set ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @q_num: queue for which the itr values has to be set + * @ec: coalesce settings to program the device with + * + * Return 0 on success, and negative on failure + */ +static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + err = idpf_set_q_coalesce(vport, ec, q_num, false); + if (err) { + idpf_vport_ctrl_unlock(netdev); + + return err; + } + + err = idpf_set_q_coalesce(vport, ec, q_num, true); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_msglevel - Get debug message level + * @netdev: network interface device structure + * + * Returns current debug message level. + */ +static u32 idpf_get_msglevel(struct net_device *netdev) +{ + struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); + + return adapter->msg_enable; +} + +/** + * idpf_set_msglevel - Set debug message level + * @netdev: network interface device structure + * @data: message level + * + * Set current debug message level. Higher values cause the driver to + * be noisier. + */ +static void idpf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); + + adapter->msg_enable = data; +} + +/** + * idpf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @cmd: ethtool command + * + * Reports speed/duplex settings. + **/ +static int idpf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = PORT_NONE; + if (vport->link_up) { + cmd->base.duplex = DUPLEX_FULL; + cmd->base.speed = vport->link_speed_mbps; + } else { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + } + + idpf_vport_ctrl_unlock(netdev); + + return 0; +} + +static const struct ethtool_ops idpf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_msglevel = idpf_get_msglevel, + .set_msglevel = idpf_set_msglevel, + .get_link = ethtool_op_get_link, + .get_coalesce = idpf_get_coalesce, + .set_coalesce = idpf_set_coalesce, + .get_per_queue_coalesce = idpf_get_per_q_coalesce, + .set_per_queue_coalesce = idpf_set_per_q_coalesce, + .get_ethtool_stats = idpf_get_ethtool_stats, + .get_strings = idpf_get_strings, + .get_sset_count = idpf_get_sset_count, + .get_channels = idpf_get_channels, + .get_rxnfc = idpf_get_rxnfc, + .get_rxfh_key_size = idpf_get_rxfh_key_size, + .get_rxfh_indir_size = idpf_get_rxfh_indir_size, + .get_rxfh = idpf_get_rxfh, + .set_rxfh = idpf_set_rxfh, + .set_channels = idpf_set_channels, + .get_ringparam = idpf_get_ringparam, + .set_ringparam = idpf_set_ringparam, + .get_link_ksettings = idpf_get_link_ksettings, +}; + +/** + * idpf_set_ethtool_ops - Initialize ethtool ops struct + * @netdev: network interface device structure + * + * Sets ethtool ops struct in our netdev so that ethtool can call + * our functions. + */ +void idpf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &idpf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h new file mode 100644 index 0000000000..24edb8a6ec --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_LAN_PF_REGS_H_ +#define _IDPF_LAN_PF_REGS_H_ + +/* Receive queues */ +#define PF_QRX_BASE 0x00000000 +#define PF_QRX_TAIL(_QRX) (PF_QRX_BASE + (((_QRX) * 0x1000))) +#define PF_QRX_BUFFQ_BASE 0x03000000 +#define PF_QRX_BUFFQ_TAIL(_QRX) (PF_QRX_BUFFQ_BASE + (((_QRX) * 0x1000))) + +/* Transmit queues */ +#define PF_QTX_BASE 0x05000000 +#define PF_QTX_COMM_DBELL(_DBQM) (PF_QTX_BASE + ((_DBQM) * 0x1000)) + +/* Control(PF Mailbox) Queue */ +#define PF_FW_BASE 0x08400000 + +#define PF_FW_ARQBAL (PF_FW_BASE) +#define PF_FW_ARQBAH (PF_FW_BASE + 0x4) +#define PF_FW_ARQLEN (PF_FW_BASE + 0x8) +#define PF_FW_ARQLEN_ARQLEN_S 0 +#define PF_FW_ARQLEN_ARQLEN_M GENMASK(12, 0) +#define PF_FW_ARQLEN_ARQVFE_S 28 +#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S) +#define PF_FW_ARQLEN_ARQOVFL_S 29 +#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S) +#define PF_FW_ARQLEN_ARQCRIT_S 30 +#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S) +#define PF_FW_ARQLEN_ARQENABLE_S 31 +#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S) +#define PF_FW_ARQH (PF_FW_BASE + 0xC) +#define PF_FW_ARQH_ARQH_S 0 +#define PF_FW_ARQH_ARQH_M GENMASK(12, 0) +#define PF_FW_ARQT (PF_FW_BASE + 0x10) + +#define PF_FW_ATQBAL (PF_FW_BASE + 0x14) +#define PF_FW_ATQBAH (PF_FW_BASE + 0x18) +#define PF_FW_ATQLEN (PF_FW_BASE + 0x1C) +#define PF_FW_ATQLEN_ATQLEN_S 0 +#define PF_FW_ATQLEN_ATQLEN_M GENMASK(9, 0) +#define PF_FW_ATQLEN_ATQVFE_S 28 +#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S) +#define PF_FW_ATQLEN_ATQOVFL_S 29 +#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S) +#define PF_FW_ATQLEN_ATQCRIT_S 30 +#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S) +#define PF_FW_ATQLEN_ATQENABLE_S 31 +#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) +#define PF_FW_ATQH (PF_FW_BASE + 0x20) +#define PF_FW_ATQH_ATQH_S 0 +#define PF_FW_ATQH_ATQH_M GENMASK(9, 0) +#define PF_FW_ATQT (PF_FW_BASE + 0x24) + +/* Interrupts */ +#define PF_GLINT_BASE 0x08900000 +#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000)) +#define PF_GLINT_DYN_CTL_INTENA_S 0 +#define PF_GLINT_DYN_CTL_INTENA_M BIT(PF_GLINT_DYN_CTL_INTENA_S) +#define PF_GLINT_DYN_CTL_CLEARPBA_S 1 +#define PF_GLINT_DYN_CTL_CLEARPBA_M BIT(PF_GLINT_DYN_CTL_CLEARPBA_S) +#define PF_GLINT_DYN_CTL_SWINT_TRIG_S 2 +#define PF_GLINT_DYN_CTL_SWINT_TRIG_M BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S) +#define PF_GLINT_DYN_CTL_ITR_INDX_S 3 +#define PF_GLINT_DYN_CTL_ITR_INDX_M GENMASK(4, 3) +#define PF_GLINT_DYN_CTL_INTERVAL_S 5 +#define PF_GLINT_DYN_CTL_INTERVAL_M BIT(PF_GLINT_DYN_CTL_INTERVAL_S) +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24 +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S) +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S 25 +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S) +#define PF_GLINT_DYN_CTL_WB_ON_ITR_S 30 +#define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S) +#define PF_GLINT_DYN_CTL_INTENA_MSK_S 31 +#define PF_GLINT_DYN_CTL_INTENA_MSK_M BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S) +/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is + * spacing b/w itrn registers of the same vector. + */ +#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ + ((_reg_start) + ((_ITR) * (_itrn_indx_spacing))) +/* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */ +#define PF_GLINT_ITR(_ITR, _INT) \ + (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000)) +#define PF_GLINT_ITR_MAX_INDEX 2 +#define PF_GLINT_ITR_INTERVAL_S 0 +#define PF_GLINT_ITR_INTERVAL_M GENMASK(11, 0) + +/* Generic registers */ +#define PF_INT_DIR_OICR_ENA 0x08406000 +#define PF_INT_DIR_OICR_ENA_S 0 +#define PF_INT_DIR_OICR_ENA_M GENMASK(31, 0) +#define PF_INT_DIR_OICR 0x08406004 +#define PF_INT_DIR_OICR_TSYN_EVNT 0 +#define PF_INT_DIR_OICR_PHY_TS_0 BIT(1) +#define PF_INT_DIR_OICR_PHY_TS_1 BIT(2) +#define PF_INT_DIR_OICR_CAUSE 0x08406008 +#define PF_INT_DIR_OICR_CAUSE_CAUSE_S 0 +#define PF_INT_DIR_OICR_CAUSE_CAUSE_M GENMASK(31, 0) +#define PF_INT_PBA_CLEAR 0x0840600C + +#define PF_FUNC_RID 0x08406010 +#define PF_FUNC_RID_FUNCTION_NUMBER_S 0 +#define PF_FUNC_RID_FUNCTION_NUMBER_M GENMASK(2, 0) +#define PF_FUNC_RID_DEVICE_NUMBER_S 3 +#define PF_FUNC_RID_DEVICE_NUMBER_M GENMASK(7, 3) +#define PF_FUNC_RID_BUS_NUMBER_S 8 +#define PF_FUNC_RID_BUS_NUMBER_M GENMASK(15, 8) + +/* Reset registers */ +#define PFGEN_RTRIG 0x08407000 +#define PFGEN_RTRIG_CORER_S 0 +#define PFGEN_RTRIG_CORER_M BIT(0) +#define PFGEN_RTRIG_LINKR_S 1 +#define PFGEN_RTRIG_LINKR_M BIT(1) +#define PFGEN_RTRIG_IMCR_S 2 +#define PFGEN_RTRIG_IMCR_M BIT(2) +#define PFGEN_RSTAT 0x08407008 /* PFR Status */ +#define PFGEN_RSTAT_PFR_STATE_S 0 +#define PFGEN_RSTAT_PFR_STATE_M GENMASK(1, 0) +#define PFGEN_CTRL 0x0840700C +#define PFGEN_CTRL_PFSWR BIT(0) + +#endif diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h new file mode 100644 index 0000000000..a5752dcab8 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_LAN_TXRX_H_ +#define _IDPF_LAN_TXRX_H_ + +enum idpf_rss_hash { + IDPF_HASH_INVALID = 0, + /* Values 1 - 28 are reserved for future use */ + IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29, + IDPF_HASH_NONF_MULTICAST_IPV4_UDP, + IDPF_HASH_NONF_IPV4_UDP, + IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK, + IDPF_HASH_NONF_IPV4_TCP, + IDPF_HASH_NONF_IPV4_SCTP, + IDPF_HASH_NONF_IPV4_OTHER, + IDPF_HASH_FRAG_IPV4, + /* Values 37-38 are reserved */ + IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39, + IDPF_HASH_NONF_MULTICAST_IPV6_UDP, + IDPF_HASH_NONF_IPV6_UDP, + IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK, + IDPF_HASH_NONF_IPV6_TCP, + IDPF_HASH_NONF_IPV6_SCTP, + IDPF_HASH_NONF_IPV6_OTHER, + IDPF_HASH_FRAG_IPV6, + IDPF_HASH_NONF_RSVD47, + IDPF_HASH_NONF_FCOE_OX, + IDPF_HASH_NONF_FCOE_RX, + IDPF_HASH_NONF_FCOE_OTHER, + /* Values 51-62 are reserved */ + IDPF_HASH_L2_PAYLOAD = 63, + + IDPF_HASH_MAX +}; + +/* Supported RSS offloads */ +#define IDPF_DEFAULT_RSS_HASH \ + (BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \ + BIT_ULL(IDPF_HASH_FRAG_IPV4) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \ + BIT_ULL(IDPF_HASH_FRAG_IPV6) | \ + BIT_ULL(IDPF_HASH_L2_PAYLOAD)) + +#define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP)) + +/* For idpf_splitq_base_tx_compl_desc */ +#define IDPF_TXD_COMPLQ_GEN_S 15 +#define IDPF_TXD_COMPLQ_GEN_M BIT_ULL(IDPF_TXD_COMPLQ_GEN_S) +#define IDPF_TXD_COMPLQ_COMPL_TYPE_S 11 +#define IDPF_TXD_COMPLQ_COMPL_TYPE_M GENMASK_ULL(13, 11) +#define IDPF_TXD_COMPLQ_QID_S 0 +#define IDPF_TXD_COMPLQ_QID_M GENMASK_ULL(9, 0) + +/* For base mode TX descriptors */ + +#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S 23 +#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S) +#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S 19 +#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M \ + (0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S) +#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S 12 +#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M \ + (0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S) +#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S 11 +#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M \ + BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S) +#define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST \ + IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M +#define IDPF_TXD_CTX_QW0_TUNN_NATT_S 9 +#define IDPF_TXD_CTX_QW0_TUNN_NATT_M (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S) +#define IDPF_TXD_CTX_UDP_TUNNELING BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S) +#define IDPF_TXD_CTX_GRE_TUNNELING (0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S) +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S 2 +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M \ + (0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S) +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S 0 +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M \ + (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S) + +#define IDPF_TXD_CTX_QW1_MSS_S 50 +#define IDPF_TXD_CTX_QW1_MSS_M GENMASK_ULL(63, 50) +#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30 +#define IDPF_TXD_CTX_QW1_TSO_LEN_M GENMASK_ULL(47, 30) +#define IDPF_TXD_CTX_QW1_CMD_S 4 +#define IDPF_TXD_CTX_QW1_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TXD_CTX_QW1_DTYPE_S 0 +#define IDPF_TXD_CTX_QW1_DTYPE_M GENMASK_ULL(3, 0) +#define IDPF_TXD_QW1_L2TAG1_S 48 +#define IDPF_TXD_QW1_L2TAG1_M GENMASK_ULL(63, 48) +#define IDPF_TXD_QW1_TX_BUF_SZ_S 34 +#define IDPF_TXD_QW1_TX_BUF_SZ_M GENMASK_ULL(47, 34) +#define IDPF_TXD_QW1_OFFSET_S 16 +#define IDPF_TXD_QW1_OFFSET_M GENMASK_ULL(33, 16) +#define IDPF_TXD_QW1_CMD_S 4 +#define IDPF_TXD_QW1_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TXD_QW1_DTYPE_S 0 +#define IDPF_TXD_QW1_DTYPE_M GENMASK_ULL(3, 0) + +/* TX Completion Descriptor Completion Types */ +#define IDPF_TXD_COMPLT_ITR_FLUSH 0 +/* Descriptor completion type 1 is reserved */ +#define IDPF_TXD_COMPLT_RS 2 +/* Descriptor completion type 3 is reserved */ +#define IDPF_TXD_COMPLT_RE 4 +#define IDPF_TXD_COMPLT_SW_MARKER 5 + +enum idpf_tx_desc_dtype_value { + IDPF_TX_DESC_DTYPE_DATA = 0, + IDPF_TX_DESC_DTYPE_CTX = 1, + /* DTYPE 2 is reserved + * DTYPE 3 is free for future use + * DTYPE 4 is reserved + */ + IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5, + /* DTYPE 6 is reserved */ + IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 = 7, + /* DTYPE 8, 9 are free for future use + * DTYPE 10 is reserved + * DTYPE 11 is free for future use + */ + IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE = 12, + /* DTYPE 13, 14 are free for future use */ + + /* DESC_DONE - HW has completed write-back of descriptor */ + IDPF_TX_DESC_DTYPE_DESC_DONE = 15, +}; + +enum idpf_tx_ctx_desc_cmd_bits { + IDPF_TX_CTX_DESC_TSO = 0x01, + IDPF_TX_CTX_DESC_TSYN = 0x02, + IDPF_TX_CTX_DESC_IL2TAG2 = 0x04, + IDPF_TX_CTX_DESC_RSVD = 0x08, + IDPF_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + IDPF_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + IDPF_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + IDPF_TX_CTX_DESC_SWTCH_VSI = 0x30, + IDPF_TX_CTX_DESC_FILT_AU_EN = 0x40, + IDPF_TX_CTX_DESC_FILT_AU_EVICT = 0x80, + IDPF_TX_CTX_DESC_RSVD1 = 0xF00 +}; + +enum idpf_tx_desc_len_fields { + /* Note: These are predefined bit offsets */ + IDPF_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */ + IDPF_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */ + IDPF_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ +}; + +enum idpf_tx_base_desc_cmd_bits { + IDPF_TX_DESC_CMD_EOP = BIT(0), + IDPF_TX_DESC_CMD_RS = BIT(1), + /* only on VFs else RSVD */ + IDPF_TX_DESC_CMD_ICRC = BIT(2), + IDPF_TX_DESC_CMD_IL2TAG1 = BIT(3), + IDPF_TX_DESC_CMD_RSVD1 = BIT(4), + IDPF_TX_DESC_CMD_IIPT_IPV6 = BIT(5), + IDPF_TX_DESC_CMD_IIPT_IPV4 = BIT(6), + IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM = GENMASK(6, 5), + IDPF_TX_DESC_CMD_RSVD2 = BIT(7), + IDPF_TX_DESC_CMD_L4T_EOFT_TCP = BIT(8), + IDPF_TX_DESC_CMD_L4T_EOFT_SCTP = BIT(9), + IDPF_TX_DESC_CMD_L4T_EOFT_UDP = GENMASK(9, 8), + IDPF_TX_DESC_CMD_RSVD3 = BIT(10), + IDPF_TX_DESC_CMD_RSVD4 = BIT(11), +}; + +/* Transmit descriptors */ +/* splitq tx buf, singleq tx buf and singleq compl desc */ +struct idpf_base_tx_desc { + __le64 buf_addr; /* Address of descriptor's data buf */ + __le64 qw1; /* type_cmd_offset_bsz_l2tag1 */ +}; /* read used with buffer queues */ + +struct idpf_splitq_tx_compl_desc { + /* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */ + __le16 qid_comptype_gen; + union { + __le16 q_head; /* Queue head */ + __le16 compl_tag; /* Completion tag */ + } q_head_compl_tag; + u8 ts[3]; + u8 rsvd; /* Reserved */ +}; /* writeback used with completion queues */ + +/* Context descriptors */ +struct idpf_base_tx_ctx_desc { + struct { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd1; + } qw0; + __le64 qw1; /* type_cmd_tlen_mss/rt_hint */ +}; + +/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */ +enum idpf_tx_flex_desc_cmd_bits { + IDPF_TX_FLEX_DESC_CMD_EOP = BIT(0), + IDPF_TX_FLEX_DESC_CMD_RS = BIT(1), + IDPF_TX_FLEX_DESC_CMD_RE = BIT(2), + IDPF_TX_FLEX_DESC_CMD_IL2TAG1 = BIT(3), + IDPF_TX_FLEX_DESC_CMD_DUMMY = BIT(4), + IDPF_TX_FLEX_DESC_CMD_CS_EN = BIT(5), + IDPF_TX_FLEX_DESC_CMD_FILT_AU_EN = BIT(6), + IDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT = BIT(7), +}; + +struct idpf_flex_tx_desc { + __le64 buf_addr; /* Packet buffer address */ + struct { +#define IDPF_FLEX_TXD_QW1_DTYPE_S 0 +#define IDPF_FLEX_TXD_QW1_DTYPE_M GENMASK(4, 0) +#define IDPF_FLEX_TXD_QW1_CMD_S 5 +#define IDPF_FLEX_TXD_QW1_CMD_M GENMASK(15, 5) + __le16 cmd_dtype; + /* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */ + struct { + __le16 l2tag1; + __le16 l2tag2; + } l2tags; + __le16 buf_size; + } qw1; +}; + +struct idpf_flex_tx_sched_desc { + __le64 buf_addr; /* Packet buffer address */ + + /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */ + struct { + u8 cmd_dtype; +#define IDPF_TXD_FLEX_FLOW_DTYPE_M GENMASK(4, 0) +#define IDPF_TXD_FLEX_FLOW_CMD_EOP BIT(5) +#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN BIT(6) +#define IDPF_TXD_FLEX_FLOW_CMD_RE BIT(7) + + /* [23:23] Horizon Overflow bit, [22:0] timestamp */ + u8 ts[3]; +#define IDPF_TXD_FLOW_SCH_HORIZON_OVERFLOW_M BIT(7) + + __le16 compl_tag; + __le16 rxr_bufsize; +#define IDPF_TXD_FLEX_FLOW_RXR BIT(14) +#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M GENMASK(13, 0) + } qw1; +}; + +/* Common cmd fields for all flex context descriptors + * Note: these defines already account for the 5 bit dtype in the cmd_dtype + * field + */ +enum idpf_tx_flex_ctx_desc_cmd_bits { + IDPF_TX_FLEX_CTX_DESC_CMD_TSO = BIT(5), + IDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN = BIT(6), + IDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2 = BIT(7), + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK = BIT(9), + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL = BIT(10), + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI = GENMASK(10, 9), +}; + +/* Standard flex descriptor TSO context quad word */ +struct idpf_flex_tx_tso_ctx_qw { + __le32 flex_tlen; +#define IDPF_TXD_FLEX_CTX_TLEN_M GENMASK(17, 0) +#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S 24 + __le16 mss_rt; +#define IDPF_TXD_FLEX_CTX_MSS_RT_M GENMASK(13, 0) + u8 hdr_len; + u8 flex; +}; + +struct idpf_flex_tx_ctx_desc { + /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */ + struct { + struct idpf_flex_tx_tso_ctx_qw qw0; + struct { + __le16 cmd_dtype; + u8 flex[6]; + } qw1; + } tso; +}; +#endif /* _IDPF_LAN_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h new file mode 100644 index 0000000000..3d73b6c768 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_LAN_VF_REGS_H_ +#define _IDPF_LAN_VF_REGS_H_ + +/* Reset */ +#define VFGEN_RSTAT 0x00008800 +#define VFGEN_RSTAT_VFR_STATE_S 0 +#define VFGEN_RSTAT_VFR_STATE_M GENMASK(1, 0) + +/* Control(VF Mailbox) Queue */ +#define VF_BASE 0x00006000 + +#define VF_ATQBAL (VF_BASE + 0x1C00) +#define VF_ATQBAH (VF_BASE + 0x1800) +#define VF_ATQLEN (VF_BASE + 0x0800) +#define VF_ATQLEN_ATQLEN_S 0 +#define VF_ATQLEN_ATQLEN_M GENMASK(9, 0) +#define VF_ATQLEN_ATQVFE_S 28 +#define VF_ATQLEN_ATQVFE_M BIT(VF_ATQLEN_ATQVFE_S) +#define VF_ATQLEN_ATQOVFL_S 29 +#define VF_ATQLEN_ATQOVFL_M BIT(VF_ATQLEN_ATQOVFL_S) +#define VF_ATQLEN_ATQCRIT_S 30 +#define VF_ATQLEN_ATQCRIT_M BIT(VF_ATQLEN_ATQCRIT_S) +#define VF_ATQLEN_ATQENABLE_S 31 +#define VF_ATQLEN_ATQENABLE_M BIT(VF_ATQLEN_ATQENABLE_S) +#define VF_ATQH (VF_BASE + 0x0400) +#define VF_ATQH_ATQH_S 0 +#define VF_ATQH_ATQH_M GENMASK(9, 0) +#define VF_ATQT (VF_BASE + 0x2400) + +#define VF_ARQBAL (VF_BASE + 0x0C00) +#define VF_ARQBAH (VF_BASE) +#define VF_ARQLEN (VF_BASE + 0x2000) +#define VF_ARQLEN_ARQLEN_S 0 +#define VF_ARQLEN_ARQLEN_M GENMASK(9, 0) +#define VF_ARQLEN_ARQVFE_S 28 +#define VF_ARQLEN_ARQVFE_M BIT(VF_ARQLEN_ARQVFE_S) +#define VF_ARQLEN_ARQOVFL_S 29 +#define VF_ARQLEN_ARQOVFL_M BIT(VF_ARQLEN_ARQOVFL_S) +#define VF_ARQLEN_ARQCRIT_S 30 +#define VF_ARQLEN_ARQCRIT_M BIT(VF_ARQLEN_ARQCRIT_S) +#define VF_ARQLEN_ARQENABLE_S 31 +#define VF_ARQLEN_ARQENABLE_M BIT(VF_ARQLEN_ARQENABLE_S) +#define VF_ARQH (VF_BASE + 0x1400) +#define VF_ARQH_ARQH_S 0 +#define VF_ARQH_ARQH_M GENMASK(12, 0) +#define VF_ARQT (VF_BASE + 0x1000) + +/* Transmit queues */ +#define VF_QTX_TAIL_BASE 0x00000000 +#define VF_QTX_TAIL(_QTX) (VF_QTX_TAIL_BASE + (_QTX) * 0x4) +#define VF_QTX_TAIL_EXT_BASE 0x00040000 +#define VF_QTX_TAIL_EXT(_QTX) (VF_QTX_TAIL_EXT_BASE + ((_QTX) * 4)) + +/* Receive queues */ +#define VF_QRX_TAIL_BASE 0x00002000 +#define VF_QRX_TAIL(_QRX) (VF_QRX_TAIL_BASE + ((_QRX) * 4)) +#define VF_QRX_TAIL_EXT_BASE 0x00050000 +#define VF_QRX_TAIL_EXT(_QRX) (VF_QRX_TAIL_EXT_BASE + ((_QRX) * 4)) +#define VF_QRXB_TAIL_BASE 0x00060000 +#define VF_QRXB_TAIL(_QRX) (VF_QRXB_TAIL_BASE + ((_QRX) * 4)) + +/* Interrupts */ +#define VF_INT_DYN_CTL0 0x00005C00 +#define VF_INT_DYN_CTL0_INTENA_S 0 +#define VF_INT_DYN_CTL0_INTENA_M BIT(VF_INT_DYN_CTL0_INTENA_S) +#define VF_INT_DYN_CTL0_ITR_INDX_S 3 +#define VF_INT_DYN_CTL0_ITR_INDX_M GENMASK(4, 3) +#define VF_INT_DYN_CTLN(_INT) (0x00003800 + ((_INT) * 4)) +#define VF_INT_DYN_CTLN_EXT(_INT) (0x00070000 + ((_INT) * 4)) +#define VF_INT_DYN_CTLN_INTENA_S 0 +#define VF_INT_DYN_CTLN_INTENA_M BIT(VF_INT_DYN_CTLN_INTENA_S) +#define VF_INT_DYN_CTLN_CLEARPBA_S 1 +#define VF_INT_DYN_CTLN_CLEARPBA_M BIT(VF_INT_DYN_CTLN_CLEARPBA_S) +#define VF_INT_DYN_CTLN_SWINT_TRIG_S 2 +#define VF_INT_DYN_CTLN_SWINT_TRIG_M BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S) +#define VF_INT_DYN_CTLN_ITR_INDX_S 3 +#define VF_INT_DYN_CTLN_ITR_INDX_M GENMASK(4, 3) +#define VF_INT_DYN_CTLN_INTERVAL_S 5 +#define VF_INT_DYN_CTLN_INTERVAL_M BIT(VF_INT_DYN_CTLN_INTERVAL_S) +#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S 24 +#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S) +#define VF_INT_DYN_CTLN_SW_ITR_INDX_S 25 +#define VF_INT_DYN_CTLN_SW_ITR_INDX_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S) +#define VF_INT_DYN_CTLN_WB_ON_ITR_S 30 +#define VF_INT_DYN_CTLN_WB_ON_ITR_M BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S) +#define VF_INT_DYN_CTLN_INTENA_MSK_S 31 +#define VF_INT_DYN_CTLN_INTENA_MSK_M BIT(VF_INT_DYN_CTLN_INTENA_MSK_S) +/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is spacing + * b/w itrn registers of the same vector + */ +#define VF_INT_ITR0(_ITR) (0x00004C00 + ((_ITR) * 4)) +#define VF_INT_ITRN_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ + ((_reg_start) + ((_ITR) * (_itrn_indx_spacing))) +/* For VF with 16 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x40 and base register offset is 0x00002800 + */ +#define VF_INT_ITRN(_INT, _ITR) \ + (0x00002800 + ((_INT) * 4) + ((_ITR) * 0x40)) +/* For VF with 64 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x100 and base register offset is 0x00002C00 + */ +#define VF_INT_ITRN_64(_INT, _ITR) \ + (0x00002C00 + ((_INT) * 4) + ((_ITR) * 0x100)) +/* For VF with 2k vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x2000 and base register offset is 0x00072000 + */ +#define VF_INT_ITRN_2K(_INT, _ITR) \ + (0x00072000 + ((_INT) * 4) + ((_ITR) * 0x2000)) +#define VF_INT_ITRN_MAX_INDEX 2 +#define VF_INT_ITRN_INTERVAL_S 0 +#define VF_INT_ITRN_INTERVAL_M GENMASK(11, 0) +#define VF_INT_PBA_CLEAR 0x00008900 + +#define VF_INT_ICR0_ENA1 0x00005000 +#define VF_INT_ICR0_ENA1_ADMINQ_S 30 +#define VF_INT_ICR0_ENA1_ADMINQ_M BIT(VF_INT_ICR0_ENA1_ADMINQ_S) +#define VF_INT_ICR0_ENA1_RSVD_S 31 +#define VF_INT_ICR01 0x00004800 +#define VF_QF_HENA(_i) (0x0000C400 + ((_i) * 4)) +#define VF_QF_HENA_MAX_INDX 1 +#define VF_QF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) +#define VF_QF_HKEY_MAX_INDX 12 +#define VF_QF_HLUT(_i) (0x0000D000 + ((_i) * 4)) +#define VF_QF_HLUT_MAX_INDX 15 +#endif diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c new file mode 100644 index 0000000000..0241e498cc --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -0,0 +1,2381 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +static const struct net_device_ops idpf_netdev_ops_splitq; +static const struct net_device_ops idpf_netdev_ops_singleq; + +const char * const idpf_vport_vc_state_str[] = { + IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING) +}; + +/** + * idpf_init_vector_stack - Fill the MSIX vector stack with vector index + * @adapter: private data struct + * + * Return 0 on success, error on failure + */ +static int idpf_init_vector_stack(struct idpf_adapter *adapter) +{ + struct idpf_vector_lifo *stack; + u16 min_vec; + u32 i; + + mutex_lock(&adapter->vector_lock); + min_vec = adapter->num_msix_entries - adapter->num_avail_msix; + stack = &adapter->vector_stack; + stack->size = adapter->num_msix_entries; + /* set the base and top to point at start of the 'free pool' to + * distribute the unused vectors on-demand basis + */ + stack->base = min_vec; + stack->top = min_vec; + + stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL); + if (!stack->vec_idx) { + mutex_unlock(&adapter->vector_lock); + + return -ENOMEM; + } + + for (i = 0; i < stack->size; i++) + stack->vec_idx[i] = i; + + mutex_unlock(&adapter->vector_lock); + + return 0; +} + +/** + * idpf_deinit_vector_stack - zero out the MSIX vector stack + * @adapter: private data struct + */ +static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) +{ + struct idpf_vector_lifo *stack; + + mutex_lock(&adapter->vector_lock); + stack = &adapter->vector_stack; + kfree(stack->vec_idx); + stack->vec_idx = NULL; + mutex_unlock(&adapter->vector_lock); +} + +/** + * idpf_mb_intr_rel_irq - Free the IRQ association with the OS + * @adapter: adapter structure + * + * This will also disable interrupt mode and queue up mailbox task. Mailbox + * task will reschedule itself if not in interrupt mode. + */ +static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) +{ + clear_bit(IDPF_MB_INTR_MODE, adapter->flags); + free_irq(adapter->msix_entries[0].vector, adapter); + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); +} + +/** + * idpf_intr_rel - Release interrupt capabilities and free memory + * @adapter: adapter to disable interrupts on + */ +void idpf_intr_rel(struct idpf_adapter *adapter) +{ + int err; + + if (!adapter->msix_entries) + return; + + idpf_mb_intr_rel_irq(adapter); + pci_free_irq_vectors(adapter->pdev); + + err = idpf_send_dealloc_vectors_msg(adapter); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to deallocate vectors: %d\n", err); + + idpf_deinit_vector_stack(adapter); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * idpf_mb_intr_clean - Interrupt handler for the mailbox + * @irq: interrupt number + * @data: pointer to the adapter structure + */ +static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data) +{ + struct idpf_adapter *adapter = (struct idpf_adapter *)data; + + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); + + return IRQ_HANDLED; +} + +/** + * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox + * @adapter: adapter to get the hardware address for register write + */ +static void idpf_mb_irq_enable(struct idpf_adapter *adapter) +{ + struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; + u32 val; + + val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m; + writel(val, intr->dyn_ctl); + writel(intr->icr_ena_ctlq_m, intr->icr_ena); +} + +/** + * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt + * @adapter: adapter structure to pass to the mailbox irq handler + */ +static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) +{ + struct idpf_q_vector *mb_vector = &adapter->mb_vector; + int irq_num, mb_vidx = 0, err; + + irq_num = adapter->msix_entries[mb_vidx].vector; + mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", + dev_driver_string(&adapter->pdev->dev), + "Mailbox", mb_vidx); + err = request_irq(irq_num, adapter->irq_mb_handler, 0, + mb_vector->name, adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "IRQ request for mailbox failed, error: %d\n", err); + + return err; + } + + set_bit(IDPF_MB_INTR_MODE, adapter->flags); + + return 0; +} + +/** + * idpf_set_mb_vec_id - Set vector index for mailbox + * @adapter: adapter structure to access the vector chunks + * + * The first vector id in the requested vector chunks from the CP is for + * the mailbox + */ +static void idpf_set_mb_vec_id(struct idpf_adapter *adapter) +{ + if (adapter->req_vec_chunks) + adapter->mb_vector.v_idx = + le16_to_cpu(adapter->caps.mailbox_vector_id); + else + adapter->mb_vector.v_idx = 0; +} + +/** + * idpf_mb_intr_init - Initialize the mailbox interrupt + * @adapter: adapter structure to store the mailbox vector + */ +static int idpf_mb_intr_init(struct idpf_adapter *adapter) +{ + adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter); + adapter->irq_mb_handler = idpf_mb_intr_clean; + + return idpf_mb_intr_req_irq(adapter); +} + +/** + * idpf_vector_lifo_push - push MSIX vector index onto stack + * @adapter: private data struct + * @vec_idx: vector index to store + */ +static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx) +{ + struct idpf_vector_lifo *stack = &adapter->vector_stack; + + lockdep_assert_held(&adapter->vector_lock); + + if (stack->top == stack->base) { + dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n", + stack->top); + return -EINVAL; + } + + stack->vec_idx[--stack->top] = vec_idx; + + return 0; +} + +/** + * idpf_vector_lifo_pop - pop MSIX vector index from stack + * @adapter: private data struct + */ +static int idpf_vector_lifo_pop(struct idpf_adapter *adapter) +{ + struct idpf_vector_lifo *stack = &adapter->vector_stack; + + lockdep_assert_held(&adapter->vector_lock); + + if (stack->top == stack->size) { + dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n"); + + return -EINVAL; + } + + return stack->vec_idx[stack->top++]; +} + +/** + * idpf_vector_stash - Store the vector indexes onto the stack + * @adapter: private data struct + * @q_vector_idxs: vector index array + * @vec_info: info related to the number of vectors + * + * This function is a no-op if there are no vectors indexes to be stashed + */ +static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs, + struct idpf_vector_info *vec_info) +{ + int i, base = 0; + u16 vec_idx; + + lockdep_assert_held(&adapter->vector_lock); + + if (!vec_info->num_curr_vecs) + return; + + /* For default vports, no need to stash vector allocated from the + * default pool onto the stack + */ + if (vec_info->default_vport) + base = IDPF_MIN_Q_VEC; + + for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) { + vec_idx = q_vector_idxs[i]; + idpf_vector_lifo_push(adapter, vec_idx); + adapter->num_avail_msix++; + } +} + +/** + * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes + * @adapter: driver specific private structure + * @q_vector_idxs: vector index array + * @vec_info: info related to the number of vectors + * + * This is the core function to distribute the MSIX vectors acquired from the + * OS. It expects the caller to pass the number of vectors required and + * also previously allocated. First, it stashes previously allocated vector + * indexes on to the stack and then figures out if it can allocate requested + * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as + * requested vectors, then this function just stashes the already allocated + * vectors and returns 0. + * + * Returns actual number of vectors allocated on success, error value on failure + * If 0 is returned, implies the stack has no vectors to allocate which is also + * a failure case for the caller + */ +int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, + u16 *q_vector_idxs, + struct idpf_vector_info *vec_info) +{ + u16 num_req_vecs, num_alloc_vecs = 0, max_vecs; + struct idpf_vector_lifo *stack; + int i, j, vecid; + + mutex_lock(&adapter->vector_lock); + stack = &adapter->vector_stack; + num_req_vecs = vec_info->num_req_vecs; + + /* Stash interrupt vector indexes onto the stack if required */ + idpf_vector_stash(adapter, q_vector_idxs, vec_info); + + if (!num_req_vecs) + goto rel_lock; + + if (vec_info->default_vport) { + /* As IDPF_MIN_Q_VEC per default vport is put aside in the + * default pool of the stack, use them for default vports + */ + j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC; + for (i = 0; i < IDPF_MIN_Q_VEC; i++) { + q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++]; + num_req_vecs--; + } + } + + /* Find if stack has enough vector to allocate */ + max_vecs = min(adapter->num_avail_msix, num_req_vecs); + + for (j = 0; j < max_vecs; j++) { + vecid = idpf_vector_lifo_pop(adapter); + q_vector_idxs[num_alloc_vecs++] = vecid; + } + adapter->num_avail_msix -= max_vecs; + +rel_lock: + mutex_unlock(&adapter->vector_lock); + + return num_alloc_vecs; +} + +/** + * idpf_intr_req - Request interrupt capabilities + * @adapter: adapter to enable interrupts on + * + * Returns 0 on success, negative on failure + */ +int idpf_intr_req(struct idpf_adapter *adapter) +{ + u16 default_vports = idpf_get_default_vports(adapter); + int num_q_vecs, total_vecs, num_vec_ids; + int min_vectors, v_actual, err; + unsigned int vector; + u16 *vecids; + + total_vecs = idpf_get_reserved_vecs(adapter); + num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; + + err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to allocate %d vectors: %d\n", num_q_vecs, err); + + return -EAGAIN; + } + + min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; + v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, + total_vecs, PCI_IRQ_MSIX); + if (v_actual < min_vectors) { + dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", + v_actual); + err = -EAGAIN; + goto send_dealloc_vecs; + } + + adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) { + err = -ENOMEM; + goto free_irq; + } + + idpf_set_mb_vec_id(adapter); + + vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); + if (!vecids) { + err = -ENOMEM; + goto free_msix; + } + + if (adapter->req_vec_chunks) { + struct virtchnl2_vector_chunks *vchunks; + struct virtchnl2_alloc_vectors *ac; + + ac = adapter->req_vec_chunks; + vchunks = &ac->vchunks; + + num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, + vchunks); + if (num_vec_ids < v_actual) { + err = -EINVAL; + goto free_vecids; + } + } else { + int i; + + for (i = 0; i < v_actual; i++) + vecids[i] = i; + } + + for (vector = 0; vector < v_actual; vector++) { + adapter->msix_entries[vector].entry = vecids[vector]; + adapter->msix_entries[vector].vector = + pci_irq_vector(adapter->pdev, vector); + } + + adapter->num_req_msix = total_vecs; + adapter->num_msix_entries = v_actual; + /* 'num_avail_msix' is used to distribute excess vectors to the vports + * after considering the minimum vectors required per each default + * vport + */ + adapter->num_avail_msix = v_actual - min_vectors; + + /* Fill MSIX vector lifo stack with vector indexes */ + err = idpf_init_vector_stack(adapter); + if (err) + goto free_vecids; + + err = idpf_mb_intr_init(adapter); + if (err) + goto deinit_vec_stack; + idpf_mb_irq_enable(adapter); + kfree(vecids); + + return 0; + +deinit_vec_stack: + idpf_deinit_vector_stack(adapter); +free_vecids: + kfree(vecids); +free_msix: + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +free_irq: + pci_free_irq_vectors(adapter->pdev); +send_dealloc_vecs: + idpf_send_dealloc_vectors_msg(adapter); + + return err; +} + +/** + * idpf_find_mac_filter - Search filter list for specific mac filter + * @vconfig: Vport config structure + * @macaddr: The MAC address + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_filter_list_lock. + **/ +static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig, + const u8 *macaddr) +{ + struct idpf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +/** + * __idpf_del_mac_filter - Delete a MAC filter from the filter list + * @vport_config: Vport config structure + * @macaddr: The MAC address + * + * Returns 0 on success, error value on failure + **/ +static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config, + const u8 *macaddr) +{ + struct idpf_mac_filter *f; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + f = idpf_find_mac_filter(vport_config, macaddr); + if (f) { + list_del(&f->list); + kfree(f); + } + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; +} + +/** + * idpf_del_mac_filter - Delete a MAC filter from the filter list + * @vport: Main vport structure + * @np: Netdev private structure + * @macaddr: The MAC address + * @async: Don't wait for return message + * + * Removes filter from list and if interface is up, tells hardware about the + * removed filter. + **/ +static int idpf_del_mac_filter(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + const u8 *macaddr, bool async) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = np->adapter->vport_config[np->vport_idx]; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + f = idpf_find_mac_filter(vport_config, macaddr); + if (f) { + f->remove = true; + } else { + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return -EINVAL; + } + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + if (np->state == __IDPF_VPORT_UP) { + int err; + + err = idpf_add_del_mac_filters(vport, np, false, async); + if (err) + return err; + } + + return __idpf_del_mac_filter(vport_config, macaddr); +} + +/** + * __idpf_add_mac_filter - Add mac filter helper function + * @vport_config: Vport config structure + * @macaddr: Address to add + * + * Takes mac_filter_list_lock spinlock to add new filter to list. + */ +static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config, + const u8 *macaddr) +{ + struct idpf_mac_filter *f; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + + f = idpf_find_mac_filter(vport_config, macaddr); + if (f) { + f->remove = false; + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; + } + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) { + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return -ENOMEM; + } + + ether_addr_copy(f->macaddr, macaddr); + list_add_tail(&f->list, &vport_config->user_config.mac_filter_list); + f->add = true; + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; +} + +/** + * idpf_add_mac_filter - Add a mac filter to the filter list + * @vport: Main vport structure + * @np: Netdev private structure + * @macaddr: The MAC address + * @async: Don't wait for return message + * + * Returns 0 on success or error on failure. If interface is up, we'll also + * send the virtchnl message to tell hardware about the filter. + **/ +static int idpf_add_mac_filter(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + const u8 *macaddr, bool async) +{ + struct idpf_vport_config *vport_config; + int err; + + vport_config = np->adapter->vport_config[np->vport_idx]; + err = __idpf_add_mac_filter(vport_config, macaddr); + if (err) + return err; + + if (np->state == __IDPF_VPORT_UP) + err = idpf_add_del_mac_filters(vport, np, true, async); + + return err; +} + +/** + * idpf_del_all_mac_filters - Delete all MAC filters in list + * @vport: main vport struct + * + * Takes mac_filter_list_lock spinlock. Deletes all filters + */ +static void idpf_del_all_mac_filters(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f, *ftmp; + + vport_config = vport->adapter->vport_config[vport->idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list, + list) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&vport_config->mac_filter_list_lock); +} + +/** + * idpf_restore_mac_filters - Re-add all MAC filters in list + * @vport: main vport struct + * + * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to + * resync filters back to HW. + */ +static void idpf_restore_mac_filters(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = vport->adapter->vport_config[vport->idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) + f->add = true; + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), + true, false); +} + +/** + * idpf_remove_mac_filters - Remove all MAC filters in list + * @vport: main vport struct + * + * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters + * to remove filters in HW. + */ +static void idpf_remove_mac_filters(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = vport->adapter->vport_config[vport->idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) + f->remove = true; + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), + false, false); +} + +/** + * idpf_deinit_mac_addr - deinitialize mac address for vport + * @vport: main vport structure + */ +static void idpf_deinit_mac_addr(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = vport->adapter->vport_config[vport->idx]; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + + f = idpf_find_mac_filter(vport_config, vport->default_mac_addr); + if (f) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&vport_config->mac_filter_list_lock); +} + +/** + * idpf_init_mac_addr - initialize mac address for vport + * @vport: main vport structure + * @netdev: pointer to netdev struct associated with this vport + */ +static int idpf_init_mac_addr(struct idpf_vport *vport, + struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_adapter *adapter = vport->adapter; + int err; + + if (is_valid_ether_addr(vport->default_mac_addr)) { + eth_hw_addr_set(netdev, vport->default_mac_addr); + ether_addr_copy(netdev->perm_addr, vport->default_mac_addr); + + return idpf_add_mac_filter(vport, np, vport->default_mac_addr, + false); + } + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_MACFILTER)) { + dev_err(&adapter->pdev->dev, + "MAC address is not provided and capability is not set\n"); + + return -EINVAL; + } + + eth_hw_addr_random(netdev); + err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false); + if (err) + return err; + + dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n", + vport->default_mac_addr, netdev->dev_addr); + ether_addr_copy(vport->default_mac_addr, netdev->dev_addr); + + return 0; +} + +/** + * idpf_cfg_netdev - Allocate, configure and register a netdev + * @vport: main vport structure + * + * Returns 0 on success, negative value on failure. + */ +static int idpf_cfg_netdev(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + netdev_features_t dflt_features; + netdev_features_t offloads = 0; + struct idpf_netdev_priv *np; + struct net_device *netdev; + u16 idx = vport->idx; + int err; + + vport_config = adapter->vport_config[idx]; + + /* It's possible we already have a netdev allocated and registered for + * this vport + */ + if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) { + netdev = adapter->netdevs[idx]; + np = netdev_priv(netdev); + np->vport = vport; + np->vport_idx = vport->idx; + np->vport_id = vport->vport_id; + vport->netdev = netdev; + + return idpf_init_mac_addr(vport, netdev); + } + + netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv), + vport_config->max_q.max_txq, + vport_config->max_q.max_rxq); + if (!netdev) + return -ENOMEM; + + vport->netdev = netdev; + np = netdev_priv(netdev); + np->vport = vport; + np->adapter = adapter; + np->vport_idx = vport->idx; + np->vport_id = vport->vport_id; + + spin_lock_init(&np->stats_lock); + + err = idpf_init_mac_addr(vport, netdev); + if (err) { + free_netdev(vport->netdev); + vport->netdev = NULL; + + return err; + } + + /* assign netdev_ops */ + if (idpf_is_queue_model_split(vport->txq_model)) + netdev->netdev_ops = &idpf_netdev_ops_splitq; + else + netdev->netdev_ops = &idpf_netdev_ops_singleq; + + /* setup watchdog timeout value to be 5 second */ + netdev->watchdog_timeo = 5 * HZ; + + netdev->dev_port = idx; + + /* configure default MTU size */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = vport->max_mtu; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA; + + if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) + dflt_features |= NETIF_F_RXHASH; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4)) + dflt_features |= NETIF_F_IP_CSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6)) + dflt_features |= NETIF_F_IPV6_CSUM; + if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) + dflt_features |= NETIF_F_RXCSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM)) + dflt_features |= NETIF_F_SCTP_CRC; + + if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) + dflt_features |= NETIF_F_TSO; + if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) + dflt_features |= NETIF_F_TSO6; + if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, + VIRTCHNL2_CAP_SEG_IPV4_UDP | + VIRTCHNL2_CAP_SEG_IPV6_UDP)) + dflt_features |= NETIF_F_GSO_UDP_L4; + if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) + offloads |= NETIF_F_GRO_HW; + /* advertise to stack only if offloads for encapsulated packets is + * supported + */ + if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS, + VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) { + offloads |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + 0; + + if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS, + IDPF_CAP_TUNNEL_TX_CSUM)) + netdev->gso_partial_features |= + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + offloads |= NETIF_F_TSO_MANGLEID; + } + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) + offloads |= NETIF_F_LOOPBACK; + + netdev->features |= dflt_features; + netdev->hw_features |= dflt_features | offloads; + netdev->hw_enc_features |= dflt_features | offloads; + idpf_set_ethtool_ops(netdev); + SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + + /* carrier off on init to avoid Tx hangs */ + netif_carrier_off(netdev); + + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(netdev); + + /* The vport can be arbitrarily released so we need to also track + * netdevs in the adapter struct + */ + adapter->netdevs[idx] = netdev; + + return 0; +} + +/** + * idpf_get_free_slot - get the next non-NULL location index in array + * @adapter: adapter in which to look for a free vport slot + */ +static int idpf_get_free_slot(struct idpf_adapter *adapter) +{ + unsigned int i; + + for (i = 0; i < adapter->max_vports; i++) { + if (!adapter->vports[i]) + return i; + } + + return IDPF_NO_FREE_SLOT; +} + +/** + * idpf_remove_features - Turn off feature configs + * @vport: virtual port structure + */ +static void idpf_remove_features(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) + idpf_remove_mac_filters(vport); +} + +/** + * idpf_vport_stop - Disable a vport + * @vport: vport to disable + */ +static void idpf_vport_stop(struct idpf_vport *vport) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + + if (np->state <= __IDPF_VPORT_DOWN) + return; + + netif_carrier_off(vport->netdev); + netif_tx_disable(vport->netdev); + + idpf_send_disable_vport_msg(vport); + idpf_send_disable_queues_msg(vport); + idpf_send_map_unmap_queue_vector_msg(vport, false); + /* Normally we ask for queues in create_vport, but if the number of + * initially requested queues have changed, for example via ethtool + * set channels, we do delete queues and then add the queues back + * instead of deleting and reallocating the vport. + */ + if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags)) + idpf_send_delete_queues_msg(vport); + + idpf_remove_features(vport); + + vport->link_up = false; + idpf_vport_intr_deinit(vport); + idpf_vport_intr_rel(vport); + idpf_vport_queues_rel(vport); + np->state = __IDPF_VPORT_DOWN; +} + +/** + * idpf_stop - Disables a network interface + * @netdev: network interface device structure + * + * The stop entry point is called when an interface is de-activated by the OS, + * and the netdevice enters the DOWN state. The hardware is still under the + * driver's control, but the netdev interface is disabled. + * + * Returns success only - not allowed to fail + */ +static int idpf_stop(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + + if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags)) + return 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + idpf_vport_stop(vport); + + idpf_vport_ctrl_unlock(netdev); + + return 0; +} + +/** + * idpf_decfg_netdev - Unregister the netdev + * @vport: vport for which netdev to be unregistered + */ +static void idpf_decfg_netdev(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + + unregister_netdev(vport->netdev); + free_netdev(vport->netdev); + vport->netdev = NULL; + + adapter->netdevs[vport->idx] = NULL; +} + +/** + * idpf_vport_rel - Delete a vport and free its resources + * @vport: the vport being removed + */ +static void idpf_vport_rel(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + struct idpf_vector_info vec_info; + struct idpf_rss_data *rss_data; + struct idpf_vport_max_q max_q; + u16 idx = vport->idx; + int i; + + vport_config = adapter->vport_config[vport->idx]; + idpf_deinit_rss(vport); + rss_data = &vport_config->user_config.rss_data; + kfree(rss_data->rss_key); + rss_data->rss_key = NULL; + + idpf_send_destroy_vport_msg(vport); + + /* Set all bits as we dont know on which vc_state the vport vhnl_wq + * is waiting on and wakeup the virtchnl workqueue even if it is + * waiting for the response as we are going down + */ + for (i = 0; i < IDPF_VC_NBITS; i++) + set_bit(i, vport->vc_state); + wake_up(&vport->vchnl_wq); + + mutex_destroy(&vport->vc_buf_lock); + + /* Clear all the bits */ + for (i = 0; i < IDPF_VC_NBITS; i++) + clear_bit(i, vport->vc_state); + + /* Release all max queues allocated to the adapter's pool */ + max_q.max_rxq = vport_config->max_q.max_rxq; + max_q.max_txq = vport_config->max_q.max_txq; + max_q.max_bufq = vport_config->max_q.max_bufq; + max_q.max_complq = vport_config->max_q.max_complq; + idpf_vport_dealloc_max_qs(adapter, &max_q); + + /* Release all the allocated vectors on the stack */ + vec_info.num_req_vecs = 0; + vec_info.num_curr_vecs = vport->num_q_vectors; + vec_info.default_vport = vport->default_vport; + + idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info); + + kfree(vport->q_vector_idxs); + vport->q_vector_idxs = NULL; + + kfree(adapter->vport_params_recvd[idx]); + adapter->vport_params_recvd[idx] = NULL; + kfree(adapter->vport_params_reqd[idx]); + adapter->vport_params_reqd[idx] = NULL; + if (adapter->vport_config[idx]) { + kfree(adapter->vport_config[idx]->req_qs_chunks); + adapter->vport_config[idx]->req_qs_chunks = NULL; + } + kfree(vport); + adapter->num_alloc_vports--; +} + +/** + * idpf_vport_dealloc - cleanup and release a given vport + * @vport: pointer to idpf vport structure + * + * returns nothing + */ +static void idpf_vport_dealloc(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + unsigned int i = vport->idx; + + idpf_deinit_mac_addr(vport); + idpf_vport_stop(vport); + + if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) + idpf_decfg_netdev(vport); + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + idpf_del_all_mac_filters(vport); + + if (adapter->netdevs[i]) { + struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]); + + np->vport = NULL; + } + + idpf_vport_rel(vport); + + adapter->vports[i] = NULL; + adapter->next_vport = idpf_get_free_slot(adapter); +} + +/** + * idpf_vport_alloc - Allocates the next available struct vport in the adapter + * @adapter: board private structure + * @max_q: vport max queue info + * + * returns a pointer to a vport on success, NULL on failure. + */ +static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct idpf_rss_data *rss_data; + u16 idx = adapter->next_vport; + struct idpf_vport *vport; + u16 num_max_q; + + if (idx == IDPF_NO_FREE_SLOT) + return NULL; + + vport = kzalloc(sizeof(*vport), GFP_KERNEL); + if (!vport) + return vport; + + if (!adapter->vport_config[idx]) { + struct idpf_vport_config *vport_config; + + vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); + if (!vport_config) { + kfree(vport); + + return NULL; + } + + adapter->vport_config[idx] = vport_config; + } + + vport->idx = idx; + vport->adapter = adapter; + vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET; + vport->default_vport = adapter->num_alloc_vports < + idpf_get_default_vports(adapter); + + num_max_q = max(max_q->max_txq, max_q->max_rxq); + vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); + if (!vport->q_vector_idxs) { + kfree(vport); + + return NULL; + } + idpf_vport_init(vport, max_q); + + /* This alloc is done separate from the LUT because it's not strictly + * dependent on how many queues we have. If we change number of queues + * and soft reset we'll need a new LUT but the key can remain the same + * for as long as the vport exists. + */ + rss_data = &adapter->vport_config[idx]->user_config.rss_data; + rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); + if (!rss_data->rss_key) { + kfree(vport); + + return NULL; + } + /* Initialize default rss key */ + netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); + + /* fill vport slot in the adapter struct */ + adapter->vports[idx] = vport; + adapter->vport_ids[idx] = idpf_get_vport_id(vport); + + adapter->num_alloc_vports++; + /* prepare adapter->next_vport for next use */ + adapter->next_vport = idpf_get_free_slot(adapter); + + return vport; +} + +/** + * idpf_get_stats64 - get statistics for network device structure + * @netdev: network interface device structure + * @stats: main device statistics structure + */ +static void idpf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + spin_lock_bh(&np->stats_lock); + *stats = np->netstats; + spin_unlock_bh(&np->stats_lock); +} + +/** + * idpf_statistics_task - Delayed task to get statistics over mailbox + * @work: work_struct handle to our data + */ +void idpf_statistics_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + int i; + + adapter = container_of(work, struct idpf_adapter, stats_task.work); + + for (i = 0; i < adapter->max_vports; i++) { + struct idpf_vport *vport = adapter->vports[i]; + + if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) + idpf_send_get_stats_msg(vport); + } + + queue_delayed_work(adapter->stats_wq, &adapter->stats_task, + msecs_to_jiffies(10000)); +} + +/** + * idpf_mbx_task - Delayed task to handle mailbox responses + * @work: work_struct handle + */ +void idpf_mbx_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + + adapter = container_of(work, struct idpf_adapter, mbx_task.work); + + if (test_bit(IDPF_MB_INTR_MODE, adapter->flags)) + idpf_mb_irq_enable(adapter); + else + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, + msecs_to_jiffies(300)); + + idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0); +} + +/** + * idpf_service_task - Delayed task for handling mailbox responses + * @work: work_struct handle to our data + * + */ +void idpf_service_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + + adapter = container_of(work, struct idpf_adapter, serv_task.work); + + if (idpf_is_reset_detected(adapter) && + !idpf_is_reset_in_prog(adapter) && + !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { + dev_info(&adapter->pdev->dev, "HW reset detected\n"); + set_bit(IDPF_HR_FUNC_RESET, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, + &adapter->vc_event_task, + msecs_to_jiffies(10)); + } + + queue_delayed_work(adapter->serv_wq, &adapter->serv_task, + msecs_to_jiffies(300)); +} + +/** + * idpf_restore_features - Restore feature configs + * @vport: virtual port structure + */ +static void idpf_restore_features(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) + idpf_restore_mac_filters(vport); +} + +/** + * idpf_set_real_num_queues - set number of queues for netdev + * @vport: virtual port structure + * + * Returns 0 on success, negative on failure. + */ +static int idpf_set_real_num_queues(struct idpf_vport *vport) +{ + int err; + + err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); + if (err) + return err; + + return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); +} + +/** + * idpf_up_complete - Complete interface up sequence + * @vport: virtual port structure + * + * Returns 0 on success, negative on failure. + */ +static int idpf_up_complete(struct idpf_vport *vport) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + + if (vport->link_up && !netif_carrier_ok(vport->netdev)) { + netif_carrier_on(vport->netdev); + netif_tx_start_all_queues(vport->netdev); + } + + np->state = __IDPF_VPORT_UP; + + return 0; +} + +/** + * idpf_rx_init_buf_tail - Write initial buffer ring tail value + * @vport: virtual port struct + */ +static void idpf_rx_init_buf_tail(struct idpf_vport *vport) +{ + int i, j; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + + if (idpf_is_queue_model_split(vport->rxq_model)) { + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_queue *q = + &grp->splitq.bufq_sets[j].bufq; + + writel(q->next_to_alloc, q->tail); + } + } else { + for (j = 0; j < grp->singleq.num_rxq; j++) { + struct idpf_queue *q = + grp->singleq.rxqs[j]; + + writel(q->next_to_alloc, q->tail); + } + } + } +} + +/** + * idpf_vport_open - Bring up a vport + * @vport: vport to bring up + * @alloc_res: allocate queue resources + */ +static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + int err; + + if (np->state != __IDPF_VPORT_DOWN) + return -EBUSY; + + /* we do not allow interface up just yet */ + netif_carrier_off(vport->netdev); + + if (alloc_res) { + err = idpf_vport_queues_alloc(vport); + if (err) + return err; + } + + err = idpf_vport_intr_alloc(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", + vport->vport_id, err); + goto queues_rel; + } + + err = idpf_vport_queue_ids_init(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + err = idpf_vport_intr_init(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + err = idpf_rx_bufs_init_all(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + err = idpf_queue_reg_init(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + idpf_rx_init_buf_tail(vport); + + err = idpf_send_config_queues_msg(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", + vport->vport_id, err); + goto intr_deinit; + } + + err = idpf_send_map_unmap_queue_vector_msg(vport, true); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n", + vport->vport_id, err); + goto intr_deinit; + } + + err = idpf_send_enable_queues_msg(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n", + vport->vport_id, err); + goto unmap_queue_vectors; + } + + err = idpf_send_enable_vport_msg(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n", + vport->vport_id, err); + err = -EAGAIN; + goto disable_queues; + } + + idpf_restore_features(vport); + + vport_config = adapter->vport_config[vport->idx]; + if (vport_config->user_config.rss_data.rss_lut) + err = idpf_config_rss(vport); + else + err = idpf_init_rss(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n", + vport->vport_id, err); + goto disable_vport; + } + + err = idpf_up_complete(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n", + vport->vport_id, err); + goto deinit_rss; + } + + return 0; + +deinit_rss: + idpf_deinit_rss(vport); +disable_vport: + idpf_send_disable_vport_msg(vport); +disable_queues: + idpf_send_disable_queues_msg(vport); +unmap_queue_vectors: + idpf_send_map_unmap_queue_vector_msg(vport, false); +intr_deinit: + idpf_vport_intr_deinit(vport); +intr_rel: + idpf_vport_intr_rel(vport); +queues_rel: + idpf_vport_queues_rel(vport); + + return err; +} + +/** + * idpf_init_task - Delayed initialization task + * @work: work_struct handle to our data + * + * Init task finishes up pending work started in probe. Due to the asynchronous + * nature in which the device communicates with hardware, we may have to wait + * several milliseconds to get a response. Instead of busy polling in probe, + * pulling it out into a delayed work task prevents us from bogging down the + * whole system waiting for a response from hardware. + */ +void idpf_init_task(struct work_struct *work) +{ + struct idpf_vport_config *vport_config; + struct idpf_vport_max_q max_q; + struct idpf_adapter *adapter; + struct idpf_netdev_priv *np; + struct idpf_vport *vport; + u16 num_default_vports; + struct pci_dev *pdev; + bool default_vport; + int index, err; + + adapter = container_of(work, struct idpf_adapter, init_task.work); + + num_default_vports = idpf_get_default_vports(adapter); + if (adapter->num_alloc_vports < num_default_vports) + default_vport = true; + else + default_vport = false; + + err = idpf_vport_alloc_max_qs(adapter, &max_q); + if (err) + goto unwind_vports; + + err = idpf_send_create_vport_msg(adapter, &max_q); + if (err) { + idpf_vport_dealloc_max_qs(adapter, &max_q); + goto unwind_vports; + } + + pdev = adapter->pdev; + vport = idpf_vport_alloc(adapter, &max_q); + if (!vport) { + err = -EFAULT; + dev_err(&pdev->dev, "failed to allocate vport: %d\n", + err); + idpf_vport_dealloc_max_qs(adapter, &max_q); + goto unwind_vports; + } + + index = vport->idx; + vport_config = adapter->vport_config[index]; + + init_waitqueue_head(&vport->sw_marker_wq); + init_waitqueue_head(&vport->vchnl_wq); + + mutex_init(&vport->vc_buf_lock); + spin_lock_init(&vport_config->mac_filter_list_lock); + + INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); + + err = idpf_check_supported_desc_ids(vport); + if (err) { + dev_err(&pdev->dev, "failed to get required descriptor ids\n"); + goto cfg_netdev_err; + } + + if (idpf_cfg_netdev(vport)) + goto cfg_netdev_err; + + err = idpf_send_get_rx_ptype_msg(vport); + if (err) + goto handle_err; + + /* Once state is put into DOWN, driver is ready for dev_open */ + np = netdev_priv(vport->netdev); + np->state = __IDPF_VPORT_DOWN; + if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) + idpf_vport_open(vport, true); + + /* Spawn and return 'idpf_init_task' work queue until all the + * default vports are created + */ + if (adapter->num_alloc_vports < num_default_vports) { + queue_delayed_work(adapter->init_wq, &adapter->init_task, + msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); + + return; + } + + for (index = 0; index < adapter->max_vports; index++) { + if (adapter->netdevs[index] && + !test_bit(IDPF_VPORT_REG_NETDEV, + adapter->vport_config[index]->flags)) { + register_netdev(adapter->netdevs[index]); + set_bit(IDPF_VPORT_REG_NETDEV, + adapter->vport_config[index]->flags); + } + } + + /* As all the required vports are created, clear the reset flag + * unconditionally here in case we were in reset and the link was down. + */ + clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + /* Start the statistics task now */ + queue_delayed_work(adapter->stats_wq, &adapter->stats_task, + msecs_to_jiffies(10 * (pdev->devfn & 0x07))); + + return; + +handle_err: + idpf_decfg_netdev(vport); +cfg_netdev_err: + idpf_vport_rel(vport); + adapter->vports[index] = NULL; +unwind_vports: + if (default_vport) { + for (index = 0; index < adapter->max_vports; index++) { + if (adapter->vports[index]) + idpf_vport_dealloc(adapter->vports[index]); + } + } + clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); +} + +/** + * idpf_sriov_ena - Enable or change number of VFs + * @adapter: private data struct + * @num_vfs: number of VFs to allocate + */ +static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs) +{ + struct device *dev = &adapter->pdev->dev; + int err; + + err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs); + if (err) { + dev_err(dev, "Failed to allocate VFs: %d\n", err); + + return err; + } + + err = pci_enable_sriov(adapter->pdev, num_vfs); + if (err) { + idpf_send_set_sriov_vfs_msg(adapter, 0); + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + + return err; + } + + adapter->num_vfs = num_vfs; + + return num_vfs; +} + +/** + * idpf_sriov_configure - Configure the requested VFs + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of vfs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct idpf_adapter *adapter = pci_get_drvdata(pdev); + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) { + dev_info(&pdev->dev, "SR-IOV is not supported on this device\n"); + + return -EOPNOTSUPP; + } + + if (num_vfs) + return idpf_sriov_ena(adapter, num_vfs); + + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n"); + + return -EBUSY; + } + + pci_disable_sriov(adapter->pdev); + idpf_send_set_sriov_vfs_msg(adapter, 0); + adapter->num_vfs = 0; + + return 0; +} + +/** + * idpf_deinit_task - Device deinit routine + * @adapter: Driver specific private structure + * + * Extended remove logic which will be used for + * hard reset as well + */ +void idpf_deinit_task(struct idpf_adapter *adapter) +{ + unsigned int i; + + /* Wait until the init_task is done else this thread might release + * the resources first and the other thread might end up in a bad state + */ + cancel_delayed_work_sync(&adapter->init_task); + + if (!adapter->vports) + return; + + cancel_delayed_work_sync(&adapter->stats_task); + + for (i = 0; i < adapter->max_vports; i++) { + if (adapter->vports[i]) + idpf_vport_dealloc(adapter->vports[i]); + } +} + +/** + * idpf_check_reset_complete - check that reset is complete + * @hw: pointer to hw struct + * @reset_reg: struct with reset registers + * + * Returns 0 if device is ready to use, or -EBUSY if it's in reset. + **/ +static int idpf_check_reset_complete(struct idpf_hw *hw, + struct idpf_reset_reg *reset_reg) +{ + struct idpf_adapter *adapter = hw->back; + int i; + + for (i = 0; i < 2000; i++) { + u32 reg_val = readl(reset_reg->rstat); + + /* 0xFFFFFFFF might be read if other side hasn't cleared the + * register for us yet and 0xFFFFFFFF is not a valid value for + * the register, so treat that as invalid. + */ + if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m)) + return 0; + + usleep_range(5000, 10000); + } + + dev_warn(&adapter->pdev->dev, "Device reset timeout!\n"); + /* Clear the reset flag unconditionally here since the reset + * technically isn't in progress anymore from the driver's perspective + */ + clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + + return -EBUSY; +} + +/** + * idpf_set_vport_state - Set the vport state to be after the reset + * @adapter: Driver specific private structure + */ +static void idpf_set_vport_state(struct idpf_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->max_vports; i++) { + struct idpf_netdev_priv *np; + + if (!adapter->netdevs[i]) + continue; + + np = netdev_priv(adapter->netdevs[i]); + if (np->state == __IDPF_VPORT_UP) + set_bit(IDPF_VPORT_UP_REQUESTED, + adapter->vport_config[i]->flags); + } +} + +/** + * idpf_init_hard_reset - Initiate a hardware reset + * @adapter: Driver specific private structure + * + * Deallocate the vports and all the resources associated with them and + * reallocate. Also reinitialize the mailbox. Return 0 on success, + * negative on failure. + */ +static int idpf_init_hard_reset(struct idpf_adapter *adapter) +{ + struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops; + struct device *dev = &adapter->pdev->dev; + struct net_device *netdev; + int err; + u16 i; + + mutex_lock(&adapter->vport_ctrl_lock); + + dev_info(dev, "Device HW Reset initiated\n"); + + /* Avoid TX hangs on reset */ + for (i = 0; i < adapter->max_vports; i++) { + netdev = adapter->netdevs[i]; + if (!netdev) + continue; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + } + + /* Prepare for reset */ + if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { + reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD); + } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { + bool is_reset = idpf_is_reset_detected(adapter); + + idpf_set_vport_state(adapter); + idpf_vc_core_deinit(adapter); + if (!is_reset) + reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET); + idpf_deinit_dflt_mbx(adapter); + } else { + dev_err(dev, "Unhandled hard reset cause\n"); + err = -EBADRQC; + goto unlock_mutex; + } + + /* Wait for reset to complete */ + err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg); + if (err) { + dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n", + adapter->state); + goto unlock_mutex; + } + + /* Reset is complete and so start building the driver resources again */ + err = idpf_init_dflt_mbx(adapter); + if (err) { + dev_err(dev, "Failed to initialize default mailbox: %d\n", err); + goto unlock_mutex; + } + + /* Initialize the state machine, also allocate memory and request + * resources + */ + err = idpf_vc_core_init(adapter); + if (err) { + idpf_deinit_dflt_mbx(adapter); + goto unlock_mutex; + } + + /* Wait till all the vports are initialized to release the reset lock, + * else user space callbacks may access uninitialized vports + */ + while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) + msleep(100); + +unlock_mutex: + mutex_unlock(&adapter->vport_ctrl_lock); + + return err; +} + +/** + * idpf_vc_event_task - Handle virtchannel event logic + * @work: work queue struct + */ +void idpf_vc_event_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + + adapter = container_of(work, struct idpf_adapter, vc_event_task.work); + + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + return; + + if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || + test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { + set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + idpf_init_hard_reset(adapter); + } +} + +/** + * idpf_initiate_soft_reset - Initiate a software reset + * @vport: virtual port data struct + * @reset_cause: reason for the soft reset + * + * Soft reset only reallocs vport queue resources. Returns 0 on success, + * negative on failure. + */ +int idpf_initiate_soft_reset(struct idpf_vport *vport, + enum idpf_vport_reset_cause reset_cause) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + enum idpf_vport_state current_state = np->state; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport *new_vport; + int err, i; + + /* If the system is low on memory, we can end up in bad state if we + * free all the memory for queue resources and try to allocate them + * again. Instead, we can pre-allocate the new resources before doing + * anything and bailing if the alloc fails. + * + * Make a clone of the existing vport to mimic its current + * configuration, then modify the new structure with any requested + * changes. Once the allocation of the new resources is done, stop the + * existing vport and copy the configuration to the main vport. If an + * error occurred, the existing vport will be untouched. + * + */ + new_vport = kzalloc(sizeof(*vport), GFP_KERNEL); + if (!new_vport) + return -ENOMEM; + + /* This purposely avoids copying the end of the struct because it + * contains wait_queues and mutexes and other stuff we don't want to + * mess with. Nothing below should use those variables from new_vport + * and should instead always refer to them in vport if they need to. + */ + memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state)); + + /* Adjust resource parameters prior to reallocating resources */ + switch (reset_cause) { + case IDPF_SR_Q_CHANGE: + err = idpf_vport_adjust_qs(new_vport); + if (err) + goto free_vport; + break; + case IDPF_SR_Q_DESC_CHANGE: + /* Update queue parameters before allocating resources */ + idpf_vport_calc_num_q_desc(new_vport); + break; + case IDPF_SR_MTU_CHANGE: + case IDPF_SR_RSC_CHANGE: + break; + default: + dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n"); + err = -EINVAL; + goto free_vport; + } + + err = idpf_vport_queues_alloc(new_vport); + if (err) + goto free_vport; + if (current_state <= __IDPF_VPORT_DOWN) { + idpf_send_delete_queues_msg(vport); + } else { + set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); + idpf_vport_stop(vport); + } + + idpf_deinit_rss(vport); + /* We're passing in vport here because we need its wait_queue + * to send a message and it should be getting all the vport + * config data out of the adapter but we need to be careful not + * to add code to add_queues to change the vport config within + * vport itself as it will be wiped with a memcpy later. + */ + err = idpf_send_add_queues_msg(vport, new_vport->num_txq, + new_vport->num_complq, + new_vport->num_rxq, + new_vport->num_bufq); + if (err) + goto err_reset; + + /* Same comment as above regarding avoiding copying the wait_queues and + * mutexes applies here. We do not want to mess with those if possible. + */ + memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state)); + + /* Since idpf_vport_queues_alloc was called with new_port, the queue + * back pointers are currently pointing to the local new_vport. Reset + * the backpointers to the original vport here + */ + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + int j; + + tx_qgrp->vport = vport; + for (j = 0; j < tx_qgrp->num_txq; j++) + tx_qgrp->txqs[j]->vport = vport; + + if (idpf_is_queue_model_split(vport->txq_model)) + tx_qgrp->complq->vport = vport; + } + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + struct idpf_queue *q; + u16 num_rxq; + int j; + + rx_qgrp->vport = vport; + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) + rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + q->vport = vport; + } + } + + if (reset_cause == IDPF_SR_Q_CHANGE) + idpf_vport_alloc_vec_indexes(vport); + + err = idpf_set_real_num_queues(vport); + if (err) + goto err_reset; + + if (current_state == __IDPF_VPORT_UP) + err = idpf_vport_open(vport, false); + + kfree(new_vport); + + return err; + +err_reset: + idpf_vport_queues_rel(new_vport); +free_vport: + kfree(new_vport); + + return err; +} + +/** + * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock + * meaning we cannot sleep in this context. Due to this, we have to add the + * filter and send the virtchnl message asynchronously without waiting for the + * response from the other side. We won't know whether or not the operation + * actually succeeded until we get the message back. Returns 0 on success, + * negative on failure. + */ +static int idpf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + return idpf_add_mac_filter(np->vport, np, addr, true); +} + +/** + * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock + * meaning we cannot sleep in this context. Due to this we have to delete the + * filter and send the virtchnl message asynchronously without waiting for the + * return from the other side. We won't know whether or not the operation + * actually succeeded until we get the message back. Returns 0 on success, + * negative on failure. + */ +static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + idpf_del_mac_filter(np->vport, np, addr, true); + + return 0; +} + +/** + * idpf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + * + * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We + * cannot sleep in this context. + */ +static void idpf_set_rx_mode(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *config_data; + struct idpf_adapter *adapter; + bool changed = false; + struct device *dev; + int err; + + adapter = np->adapter; + dev = &adapter->pdev->dev; + + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) { + __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); + __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); + } + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC)) + return; + + config_data = &adapter->vport_config[np->vport_idx]->user_config; + /* IFF_PROMISC enables both unicast and multicast promiscuous, + * while IFF_ALLMULTI only enables multicast such that: + * + * promisc + allmulti = unicast | multicast + * promisc + !allmulti = unicast | multicast + * !promisc + allmulti = multicast + */ + if ((netdev->flags & IFF_PROMISC) && + !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { + changed = true; + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); + if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags)) + dev_info(dev, "Entering multicast promiscuous mode\n"); + } + + if (!(netdev->flags & IFF_PROMISC) && + test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { + changed = true; + dev_info(dev, "Leaving promiscuous mode\n"); + } + + if (netdev->flags & IFF_ALLMULTI && + !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { + changed = true; + dev_info(dev, "Entering multicast promiscuous mode\n"); + } + + if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) && + test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { + changed = true; + dev_info(dev, "Leaving multicast promiscuous mode\n"); + } + + if (!changed) + return; + + err = idpf_set_promiscuous(adapter, config_data, np->vport_id); + if (err) + dev_err(dev, "Failed to set promiscuous mode: %d\n", err); +} + +/** + * idpf_vport_manage_rss_lut - disable/enable RSS + * @vport: the vport being changed + * + * In the event of disable request for RSS, this function will zero out RSS + * LUT, while in the event of enable request for RSS, it will reconfigure RSS + * LUT with the default LUT configuration. + */ +static int idpf_vport_manage_rss_lut(struct idpf_vport *vport) +{ + bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH); + struct idpf_rss_data *rss_data; + u16 idx = vport->idx; + int lut_size; + + rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data; + lut_size = rss_data->rss_lut_size * sizeof(u32); + + if (ena) { + /* This will contain the default or user configured LUT */ + memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size); + } else { + /* Save a copy of the current LUT to be restored later if + * requested. + */ + memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size); + + /* Zero out the current LUT to disable */ + memset(rss_data->rss_lut, 0, lut_size); + } + + return idpf_config_rss(vport); +} + +/** + * idpf_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + */ +static int idpf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct idpf_adapter *adapter; + struct idpf_vport *vport; + int err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + adapter = vport->adapter; + + if (idpf_is_reset_in_prog(adapter)) { + dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n"); + err = -EBUSY; + goto unlock_mutex; + } + + if (changed & NETIF_F_RXHASH) { + netdev->features ^= NETIF_F_RXHASH; + err = idpf_vport_manage_rss_lut(vport); + if (err) + goto unlock_mutex; + } + + if (changed & NETIF_F_GRO_HW) { + netdev->features ^= NETIF_F_GRO_HW; + err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE); + if (err) + goto unlock_mutex; + } + + if (changed & NETIF_F_LOOPBACK) { + netdev->features ^= NETIF_F_LOOPBACK; + err = idpf_send_ena_dis_loopback_msg(vport); + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_open - Called when a network interface becomes active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the netdev watchdog is enabled, + * and the stack is notified that the interface is ready. + * + * Returns 0 on success, negative value on failure + */ +static int idpf_open(struct net_device *netdev) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + err = idpf_vport_open(vport, true); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_change_mtu - NDO callback to change the MTU + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int idpf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + netdev->mtu = new_mtu; + + err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_features_check - Validate packet conforms to limits + * @skb: skb buffer + * @netdev: This port's netdev + * @features: Offload features that the stack believes apply + */ +static netdev_features_t idpf_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + struct idpf_vport *vport = idpf_netdev_to_vport(netdev); + struct idpf_adapter *adapter = vport->adapter; + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 88 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && + (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)) + features &= ~NETIF_F_GSO_MASK; + + /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */ + len = skb_network_offset(skb); + if (unlikely(len & ~(126))) + goto unsupported; + + len = skb_network_header_len(skb); + if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + goto unsupported; + + if (!skb->encapsulation) + return features; + + /* L4TUNLEN can support 127 words */ + len = skb_inner_network_header(skb) - skb_transport_header(skb); + if (unlikely(len & ~(127 * 2))) + goto unsupported; + + /* IPLEN can support at most 127 dwords */ + len = skb_inner_network_header_len(skb); + if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + goto unsupported; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + + return features; + +unsupported: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +/** + * idpf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int idpf_set_mac(struct net_device *netdev, void *p) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + struct sockaddr *addr = p; + struct idpf_vport *vport; + int err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_MACFILTER)) { + dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n"); + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + if (!is_valid_ether_addr(addr->sa_data)) { + dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n", + addr->sa_data); + err = -EADDRNOTAVAIL; + goto unlock_mutex; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) + goto unlock_mutex; + + vport_config = vport->adapter->vport_config[vport->idx]; + err = idpf_add_mac_filter(vport, np, addr->sa_data, false); + if (err) { + __idpf_del_mac_filter(vport_config, addr->sa_data); + goto unlock_mutex; + } + + if (is_valid_ether_addr(vport->default_mac_addr)) + idpf_del_mac_filter(vport, np, vport->default_mac_addr, false); + + ether_addr_copy(vport->default_mac_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_alloc_dma_mem - Allocate dma memory + * @hw: pointer to hw struct + * @mem: pointer to dma_mem struct + * @size: size of the memory to allocate + */ +void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) +{ + struct idpf_adapter *adapter = hw->back; + size_t sz = ALIGN(size, 4096); + + mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, + &mem->pa, GFP_KERNEL); + mem->size = sz; + + return mem->va; +} + +/** + * idpf_free_dma_mem - Free the allocated dma memory + * @hw: pointer to hw struct + * @mem: pointer to dma_mem struct + */ +void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) +{ + struct idpf_adapter *adapter = hw->back; + + dma_free_coherent(&adapter->pdev->dev, mem->size, + mem->va, mem->pa); + mem->size = 0; + mem->va = NULL; + mem->pa = 0; +} + +static const struct net_device_ops idpf_netdev_ops_splitq = { + .ndo_open = idpf_open, + .ndo_stop = idpf_stop, + .ndo_start_xmit = idpf_tx_splitq_start, + .ndo_features_check = idpf_features_check, + .ndo_set_rx_mode = idpf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = idpf_set_mac, + .ndo_change_mtu = idpf_change_mtu, + .ndo_get_stats64 = idpf_get_stats64, + .ndo_set_features = idpf_set_features, + .ndo_tx_timeout = idpf_tx_timeout, +}; + +static const struct net_device_ops idpf_netdev_ops_singleq = { + .ndo_open = idpf_open, + .ndo_stop = idpf_stop, + .ndo_start_xmit = idpf_tx_singleq_start, + .ndo_features_check = idpf_features_check, + .ndo_set_rx_mode = idpf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = idpf_set_mac, + .ndo_change_mtu = idpf_change_mtu, + .ndo_get_stats64 = idpf_get_stats64, + .ndo_set_features = idpf_set_features, + .ndo_tx_timeout = idpf_tx_timeout, +}; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c new file mode 100644 index 0000000000..e1febc74ce --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" +#include "idpf_devids.h" + +#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" + +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL"); + +/** + * idpf_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void idpf_remove(struct pci_dev *pdev) +{ + struct idpf_adapter *adapter = pci_get_drvdata(pdev); + int i; + + set_bit(IDPF_REMOVE_IN_PROG, adapter->flags); + + /* Wait until vc_event_task is done to consider if any hard reset is + * in progress else we may go ahead and release the resources but the + * thread doing the hard reset might continue the init path and + * end up in bad state. + */ + cancel_delayed_work_sync(&adapter->vc_event_task); + if (adapter->num_vfs) + idpf_sriov_configure(pdev, 0); + + idpf_vc_core_deinit(adapter); + /* Be a good citizen and leave the device clean on exit */ + adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET); + idpf_deinit_dflt_mbx(adapter); + + if (!adapter->netdevs) + goto destroy_wqs; + + /* There are some cases where it's possible to still have netdevs + * registered with the stack at this point, e.g. if the driver detected + * a HW reset and rmmod is called before it fully recovers. Unregister + * any stale netdevs here. + */ + for (i = 0; i < adapter->max_vports; i++) { + if (!adapter->netdevs[i]) + continue; + if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(adapter->netdevs[i]); + free_netdev(adapter->netdevs[i]); + adapter->netdevs[i] = NULL; + } + +destroy_wqs: + destroy_workqueue(adapter->init_wq); + destroy_workqueue(adapter->serv_wq); + destroy_workqueue(adapter->mbx_wq); + destroy_workqueue(adapter->stats_wq); + destroy_workqueue(adapter->vc_event_wq); + + for (i = 0; i < adapter->max_vports; i++) { + kfree(adapter->vport_config[i]); + adapter->vport_config[i] = NULL; + } + kfree(adapter->vport_config); + adapter->vport_config = NULL; + kfree(adapter->netdevs); + adapter->netdevs = NULL; + + mutex_destroy(&adapter->vport_ctrl_lock); + mutex_destroy(&adapter->vector_lock); + mutex_destroy(&adapter->queue_lock); + mutex_destroy(&adapter->vc_buf_lock); + + pci_set_drvdata(pdev, NULL); + kfree(adapter); +} + +/** + * idpf_shutdown - PCI callback for shutting down device + * @pdev: PCI device information struct + */ +static void idpf_shutdown(struct pci_dev *pdev) +{ + idpf_remove(pdev); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + +/** + * idpf_cfg_hw - Initialize HW struct + * @adapter: adapter to setup hw struct for + * + * Returns 0 on success, negative on failure + */ +static int idpf_cfg_hw(struct idpf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct idpf_hw *hw = &adapter->hw; + + hw->hw_addr = pcim_iomap_table(pdev)[0]; + if (!hw->hw_addr) { + pci_err(pdev, "failed to allocate PCI iomap table\n"); + + return -ENOMEM; + } + + hw->back = adapter; + + return 0; +} + +/** + * idpf_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in idpf_pci_tbl + * + * Returns 0 on success, negative on failure + */ +static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct idpf_adapter *adapter; + int err; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) + return -ENOMEM; + + adapter->req_tx_splitq = true; + adapter->req_rx_splitq = true; + + switch (ent->device) { + case IDPF_DEV_ID_PF: + idpf_dev_ops_init(adapter); + break; + case IDPF_DEV_ID_VF: + idpf_vf_dev_ops_init(adapter); + adapter->crc_enable = true; + break; + default: + err = -ENODEV; + dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", + ent->device); + goto err_free; + } + + adapter->pdev = pdev; + err = pcim_enable_device(pdev); + if (err) + goto err_free; + + err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (err) { + pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err)); + + goto err_free; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (err) { + pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err)); + + goto err_free; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, adapter); + + adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->init_wq) { + dev_err(dev, "Failed to allocate init workqueue\n"); + err = -ENOMEM; + goto err_free; + } + + adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->serv_wq) { + dev_err(dev, "Failed to allocate service workqueue\n"); + err = -ENOMEM; + goto err_serv_wq_alloc; + } + + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->mbx_wq) { + dev_err(dev, "Failed to allocate mailbox workqueue\n"); + err = -ENOMEM; + goto err_mbx_wq_alloc; + } + + adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->stats_wq) { + dev_err(dev, "Failed to allocate workqueue\n"); + err = -ENOMEM; + goto err_stats_wq_alloc; + } + + adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->vc_event_wq) { + dev_err(dev, "Failed to allocate virtchnl event workqueue\n"); + err = -ENOMEM; + goto err_vc_event_wq_alloc; + } + + /* setup msglvl */ + adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M); + + err = idpf_cfg_hw(adapter); + if (err) { + dev_err(dev, "Failed to configure HW structure for adapter: %d\n", + err); + goto err_cfg_hw; + } + + mutex_init(&adapter->vport_ctrl_lock); + mutex_init(&adapter->vector_lock); + mutex_init(&adapter->queue_lock); + mutex_init(&adapter->vc_buf_lock); + + init_waitqueue_head(&adapter->vchnl_wq); + + INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task); + INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task); + INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task); + INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task); + INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task); + + adapter->dev_ops.reg_ops.reset_reg_init(adapter); + set_bit(IDPF_HR_DRV_LOAD, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, + msecs_to_jiffies(10 * (pdev->devfn & 0x07))); + + return 0; + +err_cfg_hw: + destroy_workqueue(adapter->vc_event_wq); +err_vc_event_wq_alloc: + destroy_workqueue(adapter->stats_wq); +err_stats_wq_alloc: + destroy_workqueue(adapter->mbx_wq); +err_mbx_wq_alloc: + destroy_workqueue(adapter->serv_wq); +err_serv_wq_alloc: + destroy_workqueue(adapter->init_wq); +err_free: + kfree(adapter); + return err; +} + +/* idpf_pci_tbl - PCI Dev idpf ID Table + */ +static const struct pci_device_id idpf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)}, + { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)}, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(pci, idpf_pci_tbl); + +static struct pci_driver idpf_driver = { + .name = KBUILD_MODNAME, + .id_table = idpf_pci_tbl, + .probe = idpf_probe, + .sriov_configure = idpf_sriov_configure, + .remove = idpf_remove, + .shutdown = idpf_shutdown, +}; +module_pci_driver(idpf_driver); diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h new file mode 100644 index 0000000000..b21a04fccf --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_MEM_H_ +#define _IDPF_MEM_H_ + +#include + +struct idpf_dma_mem { + void *va; + dma_addr_t pa; + size_t size; +}; + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) + +#endif /* _IDPF_MEM_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c new file mode 100644 index 0000000000..20c4b3a647 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -0,0 +1,1182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_tx_singleq_csum - Enable tx checksum offloads + * @skb: pointer to skb + * @off: pointer to struct that holds offload parameters + * + * Returns 0 or error (negative) if checksum offload cannot be executed, 1 + * otherwise. + */ +static int idpf_tx_singleq_csum(struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + u32 l4_len, l3_len, l2_len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 offset, cmd = 0; + u8 l4_proto = 0; + __be16 frag_off; + bool is_tso; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + l2_len = ip.hdr - skb->data; + offset = FIELD_PREP(0x3F << IDPF_TX_DESC_LEN_MACLEN_S, l2_len / 2); + is_tso = !!(off->tx_flags & IDPF_TX_FLAGS_TSO); + if (skb->encapsulation) { + u32 tunnel = 0; + + /* define outer network header type */ + if (off->tx_flags & IDPF_TX_FLAGS_IPV4) { + /* The stack computes the IP header already, the only + * time we need the hardware to recompute it is in the + * case of TSO. + */ + tunnel |= is_tso ? + IDPF_TX_CTX_EXT_IP_IPV4 : + IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM; + + l4_proto = ip.v4->protocol; + } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) { + tunnel |= IDPF_TX_CTX_EXT_IP_IPV6; + + l4_proto = ip.v6->nexthdr; + if (ipv6_ext_hdr(l4_proto)) + ipv6_skip_exthdr(skb, skb_network_offset(skb) + + sizeof(*ip.v6), + &l4_proto, &frag_off); + } + + /* define outer transport */ + switch (l4_proto) { + case IPPROTO_UDP: + tunnel |= IDPF_TXD_CTX_UDP_TUNNELING; + break; + case IPPROTO_GRE: + tunnel |= IDPF_TXD_CTX_GRE_TUNNELING; + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + l4.hdr = skb_inner_network_header(skb); + break; + default: + if (is_tso) + return -1; + + skb_checksum_help(skb); + + return 0; + } + off->tx_flags |= IDPF_TX_FLAGS_TUNNEL; + + /* compute outer L3 header size */ + tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M, + (l4.hdr - ip.hdr) / 4); + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* compute tunnel header size */ + tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_NATLEN_M, + (ip.hdr - l4.hdr) / 2); + + /* indicate if we need to offload outer UDP header */ + if (is_tso && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M; + + /* record tunnel offload values */ + off->cd_tunneling |= tunnel; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + off->tx_flags &= ~(IDPF_TX_FLAGS_IPV4 | IDPF_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + off->tx_flags |= IDPF_TX_FLAGS_IPV4; + if (ip.v6->version == 6) + off->tx_flags |= IDPF_TX_FLAGS_IPV6; + } + + /* Enable IP checksum offloads */ + if (off->tx_flags & IDPF_TX_FLAGS_IPV4) { + l4_proto = ip.v4->protocol; + /* See comment above regarding need for HW to recompute IP + * header checksum in the case of TSO. + */ + if (is_tso) + cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM; + else + cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4; + + } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) { + cmd |= IDPF_TX_DESC_CMD_IIPT_IPV6; + l4_proto = ip.v6->nexthdr; + if (ipv6_ext_hdr(l4_proto)) + ipv6_skip_exthdr(skb, skb_network_offset(skb) + + sizeof(*ip.v6), &l4_proto, + &frag_off); + } else { + return -1; + } + + /* compute inner L3 header size */ + l3_len = l4.hdr - ip.hdr; + offset |= (l3_len / 4) << IDPF_TX_DESC_LEN_IPLEN_S; + + /* Enable L4 checksum offloads */ + switch (l4_proto) { + case IPPROTO_TCP: + /* enable checksum offloads */ + cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_TCP; + l4_len = l4.tcp->doff; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_UDP; + l4_len = sizeof(struct udphdr) >> 2; + break; + case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_SCTP; + l4_len = sizeof(struct sctphdr) >> 2; + break; + default: + if (is_tso) + return -1; + + skb_checksum_help(skb); + + return 0; + } + + offset |= l4_len << IDPF_TX_DESC_LEN_L4_LEN_S; + off->td_cmd |= cmd; + off->hdr_offsets |= offset; + + return 1; +} + +/** + * idpf_tx_singleq_map - Build the Tx base descriptor + * @tx_q: queue to send buffer on + * @first: first buffer info buffer to use + * @offloads: pointer to struct that holds offload parameters + * + * This function loops over the skb data pointed to by *first + * and gets a physical address for each memory location and programs + * it and the length into the transmit base mode descriptor. + */ +static void idpf_tx_singleq_map(struct idpf_queue *tx_q, + struct idpf_tx_buf *first, + struct idpf_tx_offload_params *offloads) +{ + u32 offsets = offloads->hdr_offsets; + struct idpf_tx_buf *tx_buf = first; + struct idpf_base_tx_desc *tx_desc; + struct sk_buff *skb = first->skb; + u64 td_cmd = offloads->td_cmd; + unsigned int data_len, size; + u16 i = tx_q->next_to_use; + struct netdev_queue *nq; + skb_frag_t *frag; + dma_addr_t dma; + u64 td_tag = 0; + + data_len = skb->data_len; + size = skb_headlen(skb); + + tx_desc = IDPF_BASE_TX_DESC(tx_q, i); + + dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); + + /* write each descriptor with CRC bit */ + if (tx_q->vport->crc_enable) + td_cmd |= IDPF_TX_DESC_CMD_ICRC; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + + if (dma_mapping_error(tx_q->dev, dma)) + return idpf_tx_dma_map_error(tx_q, skb, first, i); + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + /* align size to end of page */ + max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); + tx_desc->buf_addr = cpu_to_le64(dma); + + /* account for data chunks larger than the hardware + * can handle + */ + while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) { + tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, + offsets, + max_data, + td_tag); + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + i = 0; + } + + dma += max_data; + size -= max_data; + + max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + tx_desc->buf_addr = cpu_to_le64(dma); + } + + if (!data_len) + break; + + tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets, + size, td_tag); + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buf = &tx_q->tx_buf[i]; + } + + skb_tx_timestamp(first->skb); + + /* write last descriptor with RS and EOP bits */ + td_cmd |= (u64)(IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS); + + tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets, + size, td_tag); + + IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + netdev_tx_sent_queue(nq, first->bytecount); + + idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); +} + +/** + * idpf_tx_singleq_get_ctx_desc - grab next desc and update buffer ring + * @txq: queue to put context descriptor on + * + * Since the TX buffer rings mimics the descriptor ring, update the tx buffer + * ring entry to reflect that this index is a context descriptor + */ +static struct idpf_base_tx_ctx_desc * +idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) +{ + struct idpf_base_tx_ctx_desc *ctx_desc; + int ntu = txq->next_to_use; + + memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); + txq->tx_buf[ntu].ctx_entry = true; + + ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu); + + IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); + txq->next_to_use = ntu; + + return ctx_desc; +} + +/** + * idpf_tx_singleq_build_ctx_desc - populate context descriptor + * @txq: queue to send buffer on + * @offload: offload parameter structure + **/ +static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, + struct idpf_tx_offload_params *offload) +{ + struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq); + u64 qw1 = (u64)IDPF_TX_DESC_DTYPE_CTX; + + if (offload->tso_segs) { + qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S; + qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) & + IDPF_TXD_CTX_QW1_TSO_LEN_M; + qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) & + IDPF_TXD_CTX_QW1_MSS_M; + + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.tx.lso_pkts); + u64_stats_update_end(&txq->stats_sync); + } + + desc->qw0.tunneling_params = cpu_to_le32(offload->cd_tunneling); + + desc->qw0.l2tag2 = 0; + desc->qw0.rsvd1 = 0; + desc->qw1 = cpu_to_le64(qw1); +} + +/** + * idpf_tx_singleq_frame - Sends buffer on Tx ring using base descriptors + * @skb: send buffer + * @tx_q: queue to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_queue *tx_q) +{ + struct idpf_tx_offload_params offload = { }; + struct idpf_tx_buf *first; + unsigned int count; + __be16 protocol; + int csum, tso; + + count = idpf_tx_desc_count_required(tx_q, skb); + if (unlikely(!count)) + return idpf_tx_drop_skb(tx_q, skb); + + if (idpf_tx_maybe_stop_common(tx_q, + count + IDPF_TX_DESCS_PER_CACHE_LINE + + IDPF_TX_DESCS_FOR_CTX)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_BUSY; + } + + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IP)) + offload.tx_flags |= IDPF_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + offload.tx_flags |= IDPF_TX_FLAGS_IPV6; + + tso = idpf_tso(skb, &offload); + if (tso < 0) + goto out_drop; + + csum = idpf_tx_singleq_csum(skb, &offload); + if (csum < 0) + goto out_drop; + + if (tso || offload.cd_tunneling) + idpf_tx_singleq_build_ctx_desc(tx_q, &offload); + + /* record the location of the first descriptor for this packet */ + first = &tx_q->tx_buf[tx_q->next_to_use]; + first->skb = skb; + + if (tso) { + first->gso_segs = offload.tso_segs; + first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len); + } else { + first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + first->gso_segs = 1; + } + idpf_tx_singleq_map(tx_q, first, &offload); + + return NETDEV_TX_OK; + +out_drop: + return idpf_tx_drop_skb(tx_q, skb); +} + +/** + * idpf_tx_singleq_start - Selects the right Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, + struct net_device *netdev) +{ + struct idpf_vport *vport = idpf_netdev_to_vport(netdev); + struct idpf_queue *tx_q; + + tx_q = vport->txqs[skb_get_queue_mapping(skb)]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_OK; + } + + return idpf_tx_singleq_frame(skb, tx_q); +} + +/** + * idpf_tx_singleq_clean - Reclaim resources from queue + * @tx_q: Tx queue to clean + * @napi_budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + */ +static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, + int *cleaned) +{ + unsigned int budget = tx_q->vport->compln_clean_budget; + unsigned int total_bytes = 0, total_pkts = 0; + struct idpf_base_tx_desc *tx_desc; + s16 ntc = tx_q->next_to_clean; + struct idpf_netdev_priv *np; + struct idpf_tx_buf *tx_buf; + struct idpf_vport *vport; + struct netdev_queue *nq; + bool dont_wake; + + tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc); + tx_buf = &tx_q->tx_buf[ntc]; + ntc -= tx_q->desc_count; + + do { + struct idpf_base_tx_desc *eop_desc; + + /* If this entry in the ring was used as a context descriptor, + * it's corresponding entry in the buffer ring will indicate as + * such. We can skip this descriptor since there is no buffer + * to clean. + */ + if (tx_buf->ctx_entry) { + /* Clear this flag here to avoid stale flag values when + * this buffer is used for actual data in the future. + * There are cases where the tx_buf struct / the flags + * field will not be cleared before being reused. + */ + tx_buf->ctx_entry = false; + goto fetch_next_txq_desc; + } + + /* if next_to_watch is not set then no work pending */ + eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch; + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->qw1 & + cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_pkts += tx_buf->gso_segs; + + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buf data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buf++; + tx_desc++; + ntc++; + if (unlikely(!ntc)) { + ntc -= tx_q->desc_count; + tx_buf = tx_q->tx_buf; + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* update budget only if we did something */ + budget--; + +fetch_next_txq_desc: + tx_buf++; + tx_desc++; + ntc++; + if (unlikely(!ntc)) { + ntc -= tx_q->desc_count; + tx_buf = tx_q->tx_buf; + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + } + } while (likely(budget)); + + ntc += tx_q->desc_count; + tx_q->next_to_clean = ntc; + + *cleaned += total_pkts; + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts); + u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes); + u64_stats_update_end(&tx_q->stats_sync); + + vport = tx_q->vport; + np = netdev_priv(vport->netdev); + nq = netdev_get_tx_queue(vport->netdev, tx_q->idx); + + dont_wake = np->state != __IDPF_VPORT_UP || + !netif_carrier_ok(vport->netdev); + __netif_txq_completed_wake(nq, total_pkts, total_bytes, + IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, + dont_wake); + + return !!budget; +} + +/** + * idpf_tx_singleq_clean_all - Clean all Tx queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, + int *cleaned) +{ + u16 num_txq = q_vec->num_txq; + bool clean_complete = true; + int i, budget_per_q; + + budget_per_q = num_txq ? max(budget / num_txq, 1) : 0; + for (i = 0; i < num_txq; i++) { + struct idpf_queue *q; + + q = q_vec->tx[i]; + clean_complete &= idpf_tx_singleq_clean(q, budget_per_q, + cleaned); + } + + return clean_complete; +} + +/** + * idpf_rx_singleq_test_staterr - tests bits in Rx descriptor + * status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_ptype_len doesn't need to be shifted because it begins + * at offset zero. + */ +static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->base_wb.qword1.status_error_ptype_len & + cpu_to_le64(stat_err_bits)); +} + +/** + * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers + * @rxq: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * @ntc: next to clean + */ +static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, + union virtchnl2_rx_desc *rx_desc, + struct sk_buff *skb, u16 ntc) +{ + /* if we are the last buffer then there is nothing else to do */ + if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ))) + return false; + + return true; +} + +/** + * idpf_rx_singleq_csum - Indicate in skb if checksum is good + * @rxq: Rx ring being processed + * @skb: skb currently being received and modified + * @csum_bits: checksum bits from descriptor + * @ptype: the packet type decoded by hardware + * + * skb->protocol must be set before this function is called + */ +static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb, + struct idpf_rx_csum_decoded *csum_bits, + u16 ptype) +{ + struct idpf_rx_ptype_decoded decoded; + bool ipv4, ipv6; + + /* check if Rx checksum is enabled */ + if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM))) + return; + + /* check if HW has decoded the packet and checksum */ + if (unlikely(!(csum_bits->l3l4p))) + return; + + decoded = rxq->vport->rx_ptype_lkup[ptype]; + if (unlikely(!(decoded.known && decoded.outer_ip))) + return; + + ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4); + ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6); + + /* Check if there were any checksum errors */ + if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe))) + goto checksum_fail; + + /* Device could not do any checksum offload for certain extension + * headers as indicated by setting IPV6EXADD bit + */ + if (unlikely(ipv6 && csum_bits->ipv6exadd)) + return; + + /* check for L4 errors and handle packets that were not able to be + * checksummed due to arrival speed + */ + if (unlikely(csum_bits->l4e)) + goto checksum_fail; + + if (unlikely(csum_bits->nat && csum_bits->eudpe)) + goto checksum_fail; + + /* Handle packets that were not able to be checksummed due to arrival + * speed, in this case the stack can compute the csum. + */ + if (unlikely(csum_bits->pprs)) + return; + + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT) + skb->csum_level = 1; + + /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ + switch (decoded.inner_prot) { + case IDPF_RX_PTYPE_INNER_PROT_ICMP: + case IDPF_RX_PTYPE_INNER_PROT_TCP: + case IDPF_RX_PTYPE_INNER_PROT_UDP: + case IDPF_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + default: + return; + } + +checksum_fail: + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_update_end(&rxq->stats_sync); +} + +/** + * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: the receive descriptor + * @ptype: Rx packet type + * + * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte + * descriptor writeback format. + **/ +static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + u16 ptype) +{ + struct idpf_rx_csum_decoded csum_bits; + u32 rx_error, rx_status; + u64 qword; + + qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); + + rx_status = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M, qword); + rx_error = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, qword); + + csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M, rx_error); + csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M, + rx_error); + csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M, rx_error); + csum_bits.pprs = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M, + rx_error); + csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M, + rx_status); + csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M, + rx_status); + csum_bits.nat = 0; + csum_bits.eudpe = 0; + + idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); +} + +/** + * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: the receive descriptor + * @ptype: Rx packet type + * + * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + **/ +static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + u16 ptype) +{ + struct idpf_rx_csum_decoded csum_bits; + u16 rx_status0, rx_status1; + + rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->flex_nic_wb.status_error1); + + csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M, + rx_status0); + csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M, + rx_status0); + csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M, + rx_status0); + csum_bits.eudpe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M, + rx_status0); + csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M, + rx_status0); + csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M, + rx_status0); + csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M, + rx_status1); + csum_bits.pprs = 0; + + idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); +} + +/** + * idpf_rx_singleq_base_hash - set the hash value in the skb + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: specific descriptor + * @decoded: Decoded Rx packet type related fields + * + * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte + * descriptor writeback format. + **/ +static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + u64 mask, qw1; + + if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + return; + + mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M; + qw1 = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); + + if (FIELD_GET(mask, qw1) == mask) { + u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss); + + skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + } +} + +/** + * idpf_rx_singleq_flex_hash - set the hash value in the skb + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: specific descriptor + * @decoded: Decoded Rx packet type related fields + * + * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + **/ +static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + return; + + if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M, + le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) + skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash), + idpf_ptype_to_htype(decoded)); +} + +/** + * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx + * descriptor + * @rx_q: Rx ring being processed + * @skb: pointer to current skb being populated + * @rx_desc: descriptor for skb + * @ptype: packet type + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. + */ +static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + u16 ptype) +{ + struct idpf_rx_ptype_decoded decoded = + rx_q->vport->rx_ptype_lkup[ptype]; + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_q->vport->netdev); + + /* Check if we're using base mode descriptor IDs */ + if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) { + idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded); + idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype); + } else { + idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded); + idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype); + } +} + +/** + * idpf_rx_singleq_buf_hw_alloc_all - Replace used receive buffers + * @rx_q: queue for which the hw buffers are allocated + * @cleaned_count: number of buffers to replace + * + * Returns false if all allocations were successful, true if any fail + */ +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, + u16 cleaned_count) +{ + struct virtchnl2_singleq_rx_buf_desc *desc; + u16 nta = rx_q->next_to_alloc; + struct idpf_rx_buf *buf; + + if (!cleaned_count) + return false; + + desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); + buf = &rx_q->rx_buf.buf[nta]; + + do { + dma_addr_t addr; + + addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size); + if (unlikely(addr == DMA_MAPPING_ERROR)) + break; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + desc->pkt_addr = cpu_to_le64(addr); + desc->hdr_addr = 0; + desc++; + + buf++; + nta++; + if (unlikely(nta == rx_q->desc_count)) { + desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); + buf = rx_q->rx_buf.buf; + nta = 0; + } + + cleaned_count--; + } while (cleaned_count); + + if (rx_q->next_to_alloc != nta) { + idpf_rx_buf_hw_update(rx_q, nta); + rx_q->next_to_alloc = nta; + } + + return !!cleaned_count; +} + +/** + * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor + * @rx_q: Rx descriptor queue + * @rx_desc: the descriptor to process + * @fields: storage for extracted values + * + * Decode the Rx descriptor and extract relevant information including the + * size and Rx packet type. + * + * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte + * descriptor writeback format. + */ +static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) +{ + u64 qword; + + qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); + + fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword); + fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); +} + +/** + * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor + * @rx_q: Rx descriptor queue + * @rx_desc: the descriptor to process + * @fields: storage for extracted values + * + * Decode the Rx descriptor and extract relevant information including the + * size and Rx packet type. + * + * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + */ +static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) +{ + fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, + le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); + fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, + le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); +} + +/** + * idpf_rx_singleq_extract_fields - Extract fields from the Rx descriptor + * @rx_q: Rx descriptor queue + * @rx_desc: the descriptor to process + * @fields: storage for extracted values + * + */ +static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) +{ + if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) + idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields); + else + idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields); +} + +/** + * idpf_rx_singleq_clean - Reclaim resources after receive completes + * @rx_q: rx queue to clean + * @budget: Total limit on number of packets to process + * + * Returns true if there's any budget left (e.g. the clean is finished) + */ +static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_pkts = 0; + struct sk_buff *skb = rx_q->skb; + u16 ntc = rx_q->next_to_clean; + u16 cleaned_count = 0; + bool failure = false; + + /* Process Rx packets bounded by budget */ + while (likely(total_rx_pkts < (unsigned int)budget)) { + struct idpf_rx_extracted fields = { }; + union virtchnl2_rx_desc *rx_desc; + struct idpf_rx_buf *rx_buf; + + /* get the Rx desc from Rx queue based on 'next_to_clean' */ + rx_desc = IDPF_RX_DESC(rx_q, ntc); + + /* status_error_ptype_len will always be zero for unused + * descriptors because it's cleared in cleanup, and overlaps + * with hdr_addr which is always zero because packet split + * isn't used, if the hardware wrote DD then the length will be + * non-zero + */ +#define IDPF_RXD_DD VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M + if (!idpf_rx_singleq_test_staterr(rx_desc, + IDPF_RXD_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc + */ + dma_rmb(); + + idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); + + rx_buf = &rx_q->rx_buf.buf[ntc]; + if (!fields.size) { + idpf_rx_put_page(rx_buf); + goto skip_data; + } + + idpf_rx_sync_for_cpu(rx_buf, fields.size); + if (skb) + idpf_rx_add_frag(rx_buf, skb, fields.size); + else + skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + +skip_data: + IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); + + cleaned_count++; + + /* skip if it is non EOP desc */ + if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc)) + continue; + +#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \ + VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M) + if (unlikely(idpf_rx_singleq_test_staterr(rx_desc, + IDPF_RXD_ERR_S))) { + dev_kfree_skb_any(skb); + skb = NULL; + continue; + } + + /* pad skb if needed (to make valid ethernet frame) */ + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* protocol */ + idpf_rx_singleq_process_skb_fields(rx_q, skb, + rx_desc, fields.rx_ptype); + + /* send completed skb up the stack */ + napi_gro_receive(&rx_q->q_vector->napi, skb); + skb = NULL; + + /* update budget accounting */ + total_rx_pkts++; + } + + rx_q->skb = skb; + + rx_q->next_to_clean = ntc; + + if (cleaned_count) + failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); + + u64_stats_update_begin(&rx_q->stats_sync); + u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts); + u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes); + u64_stats_update_end(&rx_q->stats_sync); + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_pkts; +} + +/** + * idpf_rx_singleq_clean_all - Clean all Rx queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, + int *cleaned) +{ + u16 num_rxq = q_vec->num_rxq; + bool clean_complete = true; + int budget_per_q, i; + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; + for (i = 0; i < num_rxq; i++) { + struct idpf_queue *rxq = q_vec->rx[i]; + int pkts_cleaned_per_q; + + pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q); + + /* if we clean as many as budgeted, we must not be done */ + if (pkts_cleaned_per_q >= budget_per_q) + clean_complete = false; + *cleaned += pkts_cleaned_per_q; + } + + return clean_complete; +} + +/** + * idpf_vport_singleq_napi_poll - NAPI handler + * @napi: struct from which you get q_vector + * @budget: budget provided by stack + */ +int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget) +{ + struct idpf_q_vector *q_vector = + container_of(napi, struct idpf_q_vector, napi); + bool clean_complete; + int work_done = 0; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) { + idpf_tx_singleq_clean_all(q_vector, budget, &work_done); + + return budget; + } + + clean_complete = idpf_rx_singleq_clean_all(q_vector, budget, + &work_done); + clean_complete &= idpf_tx_singleq_clean_all(q_vector, budget, + &work_done); + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) + return budget; + + work_done = min_t(int, work_done, budget - 1); + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + idpf_vport_intr_update_itr_ena_irq(q_vector); + + return work_done; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c new file mode 100644 index 0000000000..9e942e5baf --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -0,0 +1,4294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_buf_lifo_push - push a buffer pointer onto stack + * @stack: pointer to stack struct + * @buf: pointer to buf to push + * + * Returns 0 on success, negative on failure + **/ +static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, + struct idpf_tx_stash *buf) +{ + if (unlikely(stack->top == stack->size)) + return -ENOSPC; + + stack->bufs[stack->top++] = buf; + + return 0; +} + +/** + * idpf_buf_lifo_pop - pop a buffer pointer from stack + * @stack: pointer to stack struct + **/ +static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack) +{ + if (unlikely(!stack->top)) + return NULL; + + return stack->bufs[--stack->top]; +} + +/** + * idpf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: TX queue + */ +void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); + + adapter->tx_timeout_count++; + + netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n", + adapter->tx_timeout_count, txqueue); + if (!idpf_is_reset_in_prog(adapter)) { + set_bit(IDPF_HR_FUNC_RESET, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, + &adapter->vc_event_task, + msecs_to_jiffies(10)); + } +} + +/** + * idpf_tx_buf_rel - Release a Tx buffer + * @tx_q: the queue that owns the buffer + * @tx_buf: the buffer to free + */ +static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) +{ + if (tx_buf->skb) { + if (dma_unmap_len(tx_buf, len)) + dma_unmap_single(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dev_kfree_skb_any(tx_buf->skb); + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + } + + tx_buf->next_to_watch = NULL; + tx_buf->skb = NULL; + tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + dma_unmap_len_set(tx_buf, len, 0); +} + +/** + * idpf_tx_buf_rel_all - Free any empty Tx buffers + * @txq: queue to be cleaned + */ +static void idpf_tx_buf_rel_all(struct idpf_queue *txq) +{ + u16 i; + + /* Buffers already cleared, nothing to do */ + if (!txq->tx_buf) + return; + + /* Free all the Tx buffer sk_buffs */ + for (i = 0; i < txq->desc_count; i++) + idpf_tx_buf_rel(txq, &txq->tx_buf[i]); + + kfree(txq->tx_buf); + txq->tx_buf = NULL; + + if (!txq->buf_stack.bufs) + return; + + for (i = 0; i < txq->buf_stack.size; i++) + kfree(txq->buf_stack.bufs[i]); + + kfree(txq->buf_stack.bufs); + txq->buf_stack.bufs = NULL; +} + +/** + * idpf_tx_desc_rel - Free Tx resources per queue + * @txq: Tx descriptor ring for a specific queue + * @bufq: buffer q or completion q + * + * Free all transmit software resources + */ +static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq) +{ + if (bufq) + idpf_tx_buf_rel_all(txq); + + if (!txq->desc_ring) + return; + + dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); + txq->desc_ring = NULL; + txq->next_to_alloc = 0; + txq->next_to_use = 0; + txq->next_to_clean = 0; +} + +/** + * idpf_tx_desc_rel_all - Free Tx Resources for All Queues + * @vport: virtual port structure + * + * Free all transmit software resources + */ +static void idpf_tx_desc_rel_all(struct idpf_vport *vport) +{ + int i, j; + + if (!vport->txq_grps) + return; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + for (j = 0; j < txq_grp->num_txq; j++) + idpf_tx_desc_rel(txq_grp->txqs[j], true); + + if (idpf_is_queue_model_split(vport->txq_model)) + idpf_tx_desc_rel(txq_grp->complq, false); + } +} + +/** + * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources + * @tx_q: queue for which the buffers are allocated + * + * Returns 0 on success, negative on failure + */ +static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) +{ + int buf_size; + int i; + + /* Allocate book keeping buffers only. Buffers to be supplied to HW + * are allocated by kernel network stack and received as part of skb + */ + buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; + tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); + if (!tx_q->tx_buf) + return -ENOMEM; + + /* Initialize tx_bufs with invalid completion tags */ + for (i = 0; i < tx_q->desc_count; i++) + tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + /* Initialize tx buf stack for out-of-order completions if + * flow scheduling offload is enabled + */ + tx_q->buf_stack.bufs = + kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *), + GFP_KERNEL); + if (!tx_q->buf_stack.bufs) + return -ENOMEM; + + tx_q->buf_stack.size = tx_q->desc_count; + tx_q->buf_stack.top = tx_q->desc_count; + + for (i = 0; i < tx_q->desc_count; i++) { + tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]), + GFP_KERNEL); + if (!tx_q->buf_stack.bufs[i]) + return -ENOMEM; + } + + return 0; +} + +/** + * idpf_tx_desc_alloc - Allocate the Tx descriptors + * @tx_q: the tx ring to set up + * @bufq: buffer or completion queue + * + * Returns 0 on success, negative on failure + */ +static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) +{ + struct device *dev = tx_q->dev; + u32 desc_sz; + int err; + + if (bufq) { + err = idpf_tx_buf_alloc_all(tx_q); + if (err) + goto err_alloc; + + desc_sz = sizeof(struct idpf_base_tx_desc); + } else { + desc_sz = sizeof(struct idpf_splitq_tx_compl_desc); + } + + tx_q->size = tx_q->desc_count * desc_sz; + + /* Allocate descriptors also round up to nearest 4K */ + tx_q->size = ALIGN(tx_q->size, 4096); + tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma, + GFP_KERNEL); + if (!tx_q->desc_ring) { + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_q->size); + err = -ENOMEM; + goto err_alloc; + } + + tx_q->next_to_alloc = 0; + tx_q->next_to_use = 0; + tx_q->next_to_clean = 0; + set_bit(__IDPF_Q_GEN_CHK, tx_q->flags); + + return 0; + +err_alloc: + idpf_tx_desc_rel(tx_q, bufq); + + return err; +} + +/** + * idpf_tx_desc_alloc_all - allocate all queues Tx resources + * @vport: virtual port private structure + * + * Returns 0 on success, negative on failure + */ +static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) +{ + struct device *dev = &vport->adapter->pdev->dev; + int err = 0; + int i, j; + + /* Setup buffer queues. In single queue model buffer queues and + * completion queues will be same + */ + for (i = 0; i < vport->num_txq_grp; i++) { + for (j = 0; j < vport->txq_grps[i].num_txq; j++) { + struct idpf_queue *txq = vport->txq_grps[i].txqs[j]; + u8 gen_bits = 0; + u16 bufidx_mask; + + err = idpf_tx_desc_alloc(txq, true); + if (err) { + dev_err(dev, "Allocation for Tx Queue %u failed\n", + i); + goto err_out; + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + txq->compl_tag_cur_gen = 0; + + /* Determine the number of bits in the bufid + * mask and add one to get the start of the + * generation bits + */ + bufidx_mask = txq->desc_count - 1; + while (bufidx_mask >> 1) { + txq->compl_tag_gen_s++; + bufidx_mask = bufidx_mask >> 1; + } + txq->compl_tag_gen_s++; + + gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH - + txq->compl_tag_gen_s; + txq->compl_tag_gen_max = GETMAXVAL(gen_bits); + + /* Set bufid mask based on location of first + * gen bit; it cannot simply be the descriptor + * ring size-1 since we can have size values + * where not all of those bits are set. + */ + txq->compl_tag_bufid_m = + GETMAXVAL(txq->compl_tag_gen_s); + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + /* Setup completion queues */ + err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false); + if (err) { + dev_err(dev, "Allocation for Tx Completion Queue %u failed\n", + i); + goto err_out; + } + } + +err_out: + if (err) + idpf_tx_desc_rel_all(vport); + + return err; +} + +/** + * idpf_rx_page_rel - Release an rx buffer page + * @rxq: the queue that owns the buffer + * @rx_buf: the buffer to free + */ +static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf) +{ + if (unlikely(!rx_buf->page)) + return; + + page_pool_put_full_page(rxq->pp, rx_buf->page, false); + + rx_buf->page = NULL; + rx_buf->page_offset = 0; +} + +/** + * idpf_rx_hdr_buf_rel_all - Release header buffer memory + * @rxq: queue to use + */ +static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq) +{ + struct idpf_adapter *adapter = rxq->vport->adapter; + + dma_free_coherent(&adapter->pdev->dev, + rxq->desc_count * IDPF_HDR_BUF_SIZE, + rxq->rx_buf.hdr_buf_va, + rxq->rx_buf.hdr_buf_pa); + rxq->rx_buf.hdr_buf_va = NULL; +} + +/** + * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue + * @rxq: queue to be cleaned + */ +static void idpf_rx_buf_rel_all(struct idpf_queue *rxq) +{ + u16 i; + + /* queue already cleared, nothing to do */ + if (!rxq->rx_buf.buf) + return; + + /* Free all the bufs allocated and given to hw on Rx queue */ + for (i = 0; i < rxq->desc_count; i++) + idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]); + + if (rxq->rx_hsplit_en) + idpf_rx_hdr_buf_rel_all(rxq); + + page_pool_destroy(rxq->pp); + rxq->pp = NULL; + + kfree(rxq->rx_buf.buf); + rxq->rx_buf.buf = NULL; +} + +/** + * idpf_rx_desc_rel - Free a specific Rx q resources + * @rxq: queue to clean the resources from + * @bufq: buffer q or completion q + * @q_model: single or split q model + * + * Free a specific rx queue resources + */ +static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) +{ + if (!rxq) + return; + + if (rxq->skb) { + dev_kfree_skb_any(rxq->skb); + rxq->skb = NULL; + } + + if (bufq || !idpf_is_queue_model_split(q_model)) + idpf_rx_buf_rel_all(rxq); + + rxq->next_to_alloc = 0; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; + if (!rxq->desc_ring) + return; + + dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma); + rxq->desc_ring = NULL; +} + +/** + * idpf_rx_desc_rel_all - Free Rx Resources for All Queues + * @vport: virtual port structure + * + * Free all rx queues resources + */ +static void idpf_rx_desc_rel_all(struct idpf_vport *vport) +{ + struct idpf_rxq_group *rx_qgrp; + u16 num_rxq; + int i, j; + + if (!vport->rxq_grps) + return; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + + if (!idpf_is_queue_model_split(vport->rxq_model)) { + for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) + idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], + false, vport->rxq_model); + continue; + } + + num_rxq = rx_qgrp->splitq.num_rxq_sets; + for (j = 0; j < num_rxq; j++) + idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, + false, vport->rxq_model); + + if (!rx_qgrp->splitq.bufq_sets) + continue; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_bufq_set *bufq_set = + &rx_qgrp->splitq.bufq_sets[j]; + + idpf_rx_desc_rel(&bufq_set->bufq, true, + vport->rxq_model); + } + } +} + +/** + * idpf_rx_buf_hw_update - Store the new tail and head values + * @rxq: queue to bump + * @val: new head index + */ +void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val) +{ + rxq->next_to_use = val; + + if (unlikely(!rxq->tail)) + return; + + /* writel has an implicit memory barrier */ + writel(val, rxq->tail); +} + +/** + * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers + * @rxq: ring to use + * + * Returns 0 on success, negative on failure. + */ +static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) +{ + struct idpf_adapter *adapter = rxq->vport->adapter; + + rxq->rx_buf.hdr_buf_va = + dma_alloc_coherent(&adapter->pdev->dev, + IDPF_HDR_BUF_SIZE * rxq->desc_count, + &rxq->rx_buf.hdr_buf_pa, + GFP_KERNEL); + if (!rxq->rx_buf.hdr_buf_va) + return -ENOMEM; + + return 0; +} + +/** + * idpf_rx_post_buf_refill - Post buffer id to refill queue + * @refillq: refill queue to post to + * @buf_id: buffer id to post + */ +static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) +{ + u16 nta = refillq->next_to_alloc; + + /* store the buffer ID and the SW maintained GEN bit to the refillq */ + refillq->ring[nta] = + ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) | + (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) << + IDPF_RX_BI_GEN_S); + + if (unlikely(++nta == refillq->desc_count)) { + nta = 0; + change_bit(__IDPF_Q_GEN_CHK, refillq->flags); + } + refillq->next_to_alloc = nta; +} + +/** + * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring + * @bufq: buffer queue to post to + * @buf_id: buffer id to post + * + * Returns false if buffer could not be allocated, true otherwise. + */ +static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) +{ + struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; + u16 nta = bufq->next_to_alloc; + struct idpf_rx_buf *buf; + dma_addr_t addr; + + splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); + buf = &bufq->rx_buf.buf[buf_id]; + + if (bufq->rx_hsplit_en) { + splitq_rx_desc->hdr_addr = + cpu_to_le64(bufq->rx_buf.hdr_buf_pa + + (u32)buf_id * IDPF_HDR_BUF_SIZE); + } + + addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); + if (unlikely(addr == DMA_MAPPING_ERROR)) + return false; + + splitq_rx_desc->pkt_addr = cpu_to_le64(addr); + splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id); + + nta++; + if (unlikely(nta == bufq->desc_count)) + nta = 0; + bufq->next_to_alloc = nta; + + return true; +} + +/** + * idpf_rx_post_init_bufs - Post initial buffers to bufq + * @bufq: buffer queue to post working set to + * @working_set: number of buffers to put in working set + * + * Returns true if @working_set bufs were posted successfully, false otherwise. + */ +static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) +{ + int i; + + for (i = 0; i < working_set; i++) { + if (!idpf_rx_post_buf_desc(bufq, i)) + return false; + } + + idpf_rx_buf_hw_update(bufq, + bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1)); + + return true; +} + +/** + * idpf_rx_create_page_pool - Create a page pool + * @rxbufq: RX queue to create page pool for + * + * Returns &page_pool on success, casted -errno on failure + */ +static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) +{ + struct page_pool_params pp = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = rxbufq->desc_count, + .nid = NUMA_NO_NODE, + .dev = rxbufq->vport->netdev->dev.parent, + .max_len = PAGE_SIZE, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + }; + + return page_pool_create(&pp); +} + +/** + * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources + * @rxbufq: queue for which the buffers are allocated; equivalent to + * rxq when operating in singleq mode + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq) +{ + int err = 0; + + /* Allocate book keeping buffers */ + rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count, + sizeof(struct idpf_rx_buf), GFP_KERNEL); + if (!rxbufq->rx_buf.buf) { + err = -ENOMEM; + goto rx_buf_alloc_all_out; + } + + if (rxbufq->rx_hsplit_en) { + err = idpf_rx_hdr_buf_alloc_all(rxbufq); + if (err) + goto rx_buf_alloc_all_out; + } + + /* Allocate buffers to be given to HW. */ + if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) { + int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq); + + if (!idpf_rx_post_init_bufs(rxbufq, working_set)) + err = -ENOMEM; + } else { + if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq, + rxbufq->desc_count - 1)) + err = -ENOMEM; + } + +rx_buf_alloc_all_out: + if (err) + idpf_rx_buf_rel_all(rxbufq); + + return err; +} + +/** + * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW + * @rxbufq: RX queue to create page pool for + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) +{ + struct page_pool *pool; + + pool = idpf_rx_create_page_pool(rxbufq); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + rxbufq->pp = pool; + + return idpf_rx_buf_alloc_all(rxbufq); +} + +/** + * idpf_rx_bufs_init_all - Initialize all RX bufs + * @vport: virtual port struct + * + * Returns 0 on success, negative on failure + */ +int idpf_rx_bufs_init_all(struct idpf_vport *vport) +{ + struct idpf_rxq_group *rx_qgrp; + struct idpf_queue *q; + int i, j, err; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + + /* Allocate bufs for the rxq itself in singleq */ + if (!idpf_is_queue_model_split(vport->rxq_model)) { + int num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + q = rx_qgrp->singleq.rxqs[j]; + err = idpf_rx_bufs_init(q); + if (err) + return err; + } + + continue; + } + + /* Otherwise, allocate bufs for the buffer queues */ + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + err = idpf_rx_bufs_init(q); + if (err) + return err; + } + } + + return 0; +} + +/** + * idpf_rx_desc_alloc - Allocate queue Rx resources + * @rxq: Rx queue for which the resources are setup + * @bufq: buffer or completion queue + * @q_model: single or split queue model + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) +{ + struct device *dev = rxq->dev; + + if (bufq) + rxq->size = rxq->desc_count * + sizeof(struct virtchnl2_splitq_rx_buf_desc); + else + rxq->size = rxq->desc_count * + sizeof(union virtchnl2_rx_desc); + + /* Allocate descriptors and also round up to nearest 4K */ + rxq->size = ALIGN(rxq->size, 4096); + rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size, + &rxq->dma, GFP_KERNEL); + if (!rxq->desc_ring) { + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rxq->size); + return -ENOMEM; + } + + rxq->next_to_alloc = 0; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; + set_bit(__IDPF_Q_GEN_CHK, rxq->flags); + + return 0; +} + +/** + * idpf_rx_desc_alloc_all - allocate all RX queues resources + * @vport: virtual port structure + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) +{ + struct device *dev = &vport->adapter->pdev->dev; + struct idpf_rxq_group *rx_qgrp; + struct idpf_queue *q; + int i, j, err; + u16 num_rxq; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + err = idpf_rx_desc_alloc(q, false, vport->rxq_model); + if (err) { + dev_err(dev, "Memory allocation for Rx Queue %u failed\n", + i); + goto err_out; + } + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + continue; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + err = idpf_rx_desc_alloc(q, true, vport->rxq_model); + if (err) { + dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n", + i); + goto err_out; + } + } + } + + return 0; + +err_out: + idpf_rx_desc_rel_all(vport); + + return err; +} + +/** + * idpf_txq_group_rel - Release all resources for txq groups + * @vport: vport to release txq groups on + */ +static void idpf_txq_group_rel(struct idpf_vport *vport) +{ + int i, j; + + if (!vport->txq_grps) + return; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + for (j = 0; j < txq_grp->num_txq; j++) { + kfree(txq_grp->txqs[j]); + txq_grp->txqs[j] = NULL; + } + kfree(txq_grp->complq); + txq_grp->complq = NULL; + } + kfree(vport->txq_grps); + vport->txq_grps = NULL; +} + +/** + * idpf_rxq_sw_queue_rel - Release software queue resources + * @rx_qgrp: rx queue group with software queues + */ +static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp) +{ + int i, j; + + for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { + struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; + + for (j = 0; j < bufq_set->num_refillqs; j++) { + kfree(bufq_set->refillqs[j].ring); + bufq_set->refillqs[j].ring = NULL; + } + kfree(bufq_set->refillqs); + bufq_set->refillqs = NULL; + } +} + +/** + * idpf_rxq_group_rel - Release all resources for rxq groups + * @vport: vport to release rxq groups on + */ +static void idpf_rxq_group_rel(struct idpf_vport *vport) +{ + int i; + + if (!vport->rxq_grps) + return; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + int j; + + if (idpf_is_queue_model_split(vport->rxq_model)) { + num_rxq = rx_qgrp->splitq.num_rxq_sets; + for (j = 0; j < num_rxq; j++) { + kfree(rx_qgrp->splitq.rxq_sets[j]); + rx_qgrp->splitq.rxq_sets[j] = NULL; + } + + idpf_rxq_sw_queue_rel(rx_qgrp); + kfree(rx_qgrp->splitq.bufq_sets); + rx_qgrp->splitq.bufq_sets = NULL; + } else { + num_rxq = rx_qgrp->singleq.num_rxq; + for (j = 0; j < num_rxq; j++) { + kfree(rx_qgrp->singleq.rxqs[j]); + rx_qgrp->singleq.rxqs[j] = NULL; + } + } + } + kfree(vport->rxq_grps); + vport->rxq_grps = NULL; +} + +/** + * idpf_vport_queue_grp_rel_all - Release all queue groups + * @vport: vport to release queue groups for + */ +static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport) +{ + idpf_txq_group_rel(vport); + idpf_rxq_group_rel(vport); +} + +/** + * idpf_vport_queues_rel - Free memory for all queues + * @vport: virtual port + * + * Free the memory allocated for queues associated to a vport + */ +void idpf_vport_queues_rel(struct idpf_vport *vport) +{ + idpf_tx_desc_rel_all(vport); + idpf_rx_desc_rel_all(vport); + idpf_vport_queue_grp_rel_all(vport); + + kfree(vport->txqs); + vport->txqs = NULL; +} + +/** + * idpf_vport_init_fast_path_txqs - Initialize fast path txq array + * @vport: vport to init txqs on + * + * We get a queue index from skb->queue_mapping and we need a fast way to + * dereference the queue from queue groups. This allows us to quickly pull a + * txq based on a queue index. + * + * Returns 0 on success, negative on failure + */ +static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) +{ + int i, j, k = 0; + + vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *), + GFP_KERNEL); + + if (!vport->txqs) + return -ENOMEM; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; + + for (j = 0; j < tx_grp->num_txq; j++, k++) { + vport->txqs[k] = tx_grp->txqs[j]; + vport->txqs[k]->idx = k; + } + } + + return 0; +} + +/** + * idpf_vport_init_num_qs - Initialize number of queues + * @vport: vport to initialize queues + * @vport_msg: data to be filled into vport + */ +void idpf_vport_init_num_qs(struct idpf_vport *vport, + struct virtchnl2_create_vport *vport_msg) +{ + struct idpf_vport_user_config_data *config_data; + u16 idx = vport->idx; + + config_data = &vport->adapter->vport_config[idx]->user_config; + vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); + vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); + /* number of txqs and rxqs in config data will be zeros only in the + * driver load path and we dont update them there after + */ + if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) { + config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); + config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); + } + + if (idpf_is_queue_model_split(vport->txq_model)) + vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); + if (idpf_is_queue_model_split(vport->rxq_model)) + vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); + + /* Adjust number of buffer queues per Rx queue group. */ + if (!idpf_is_queue_model_split(vport->rxq_model)) { + vport->num_bufqs_per_qgrp = 0; + vport->bufq_size[0] = IDPF_RX_BUF_2048; + + return; + } + + vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; + /* Bufq[0] default buffer size is 4K + * Bufq[1] default buffer size is 2K + */ + vport->bufq_size[0] = IDPF_RX_BUF_4096; + vport->bufq_size[1] = IDPF_RX_BUF_2048; +} + +/** + * idpf_vport_calc_num_q_desc - Calculate number of queue groups + * @vport: vport to calculate q groups for + */ +void idpf_vport_calc_num_q_desc(struct idpf_vport *vport) +{ + struct idpf_vport_user_config_data *config_data; + int num_bufqs = vport->num_bufqs_per_qgrp; + u32 num_req_txq_desc, num_req_rxq_desc; + u16 idx = vport->idx; + int i; + + config_data = &vport->adapter->vport_config[idx]->user_config; + num_req_txq_desc = config_data->num_req_txq_desc; + num_req_rxq_desc = config_data->num_req_rxq_desc; + + vport->complq_desc_count = 0; + if (num_req_txq_desc) { + vport->txq_desc_count = num_req_txq_desc; + if (idpf_is_queue_model_split(vport->txq_model)) { + vport->complq_desc_count = num_req_txq_desc; + if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) + vport->complq_desc_count = + IDPF_MIN_TXQ_COMPLQ_DESC; + } + } else { + vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; + if (idpf_is_queue_model_split(vport->txq_model)) + vport->complq_desc_count = + IDPF_DFLT_TX_COMPLQ_DESC_COUNT; + } + + if (num_req_rxq_desc) + vport->rxq_desc_count = num_req_rxq_desc; + else + vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; + + for (i = 0; i < num_bufqs; i++) { + if (!vport->bufq_desc_count[i]) + vport->bufq_desc_count[i] = + IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, + num_bufqs); + } +} + +/** + * idpf_vport_calc_total_qs - Calculate total number of queues + * @adapter: private data struct + * @vport_idx: vport idx to retrieve vport pointer + * @vport_msg: message to fill with data + * @max_q: vport max queue info + * + * Return 0 on success, error value on failure. + */ +int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, + struct virtchnl2_create_vport *vport_msg, + struct idpf_vport_max_q *max_q) +{ + int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0; + int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0; + u16 num_req_tx_qs = 0, num_req_rx_qs = 0; + struct idpf_vport_config *vport_config; + u16 num_txq_grps, num_rxq_grps; + u32 num_qs; + + vport_config = adapter->vport_config[vport_idx]; + if (vport_config) { + num_req_tx_qs = vport_config->user_config.num_req_tx_qs; + num_req_rx_qs = vport_config->user_config.num_req_rx_qs; + } else { + int num_cpus; + + /* Restrict num of queues to cpus online as a default + * configuration to give best performance. User can always + * override to a max number of queues via ethtool. + */ + num_cpus = num_online_cpus(); + + dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus); + dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus); + dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus); + dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus); + } + + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) { + num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps; + vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps * + IDPF_COMPLQ_PER_GROUP); + vport_msg->num_tx_q = cpu_to_le16(num_txq_grps * + IDPF_DFLT_SPLITQ_TXQ_PER_GROUP); + } else { + num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; + num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs : + dflt_singleq_txqs); + vport_msg->num_tx_q = cpu_to_le16(num_qs); + vport_msg->num_tx_complq = 0; + } + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) { + num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps; + vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps * + IDPF_MAX_BUFQS_PER_RXQ_GRP); + vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps * + IDPF_DFLT_SPLITQ_RXQ_PER_GROUP); + } else { + num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; + num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs : + dflt_singleq_rxqs); + vport_msg->num_rx_q = cpu_to_le16(num_qs); + vport_msg->num_rx_bufq = 0; + } + + return 0; +} + +/** + * idpf_vport_calc_num_q_groups - Calculate number of queue groups + * @vport: vport to calculate q groups for + */ +void idpf_vport_calc_num_q_groups(struct idpf_vport *vport) +{ + if (idpf_is_queue_model_split(vport->txq_model)) + vport->num_txq_grp = vport->num_txq; + else + vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; + + if (idpf_is_queue_model_split(vport->rxq_model)) + vport->num_rxq_grp = vport->num_rxq; + else + vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; +} + +/** + * idpf_vport_calc_numq_per_grp - Calculate number of queues per group + * @vport: vport to calculate queues for + * @num_txq: return parameter for number of TX queues + * @num_rxq: return parameter for number of RX queues + */ +static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, + u16 *num_txq, u16 *num_rxq) +{ + if (idpf_is_queue_model_split(vport->txq_model)) + *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; + else + *num_txq = vport->num_txq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; + else + *num_rxq = vport->num_rxq; +} + +/** + * idpf_rxq_set_descids - set the descids supported by this queue + * @vport: virtual port data structure + * @q: rx queue for which descids are set + * + */ +static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) +{ + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; + } else { + if (vport->base_rxd) + q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; + else + q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; + } +} + +/** + * idpf_txq_group_alloc - Allocate all txq group resources + * @vport: vport to allocate txq groups for + * @num_txq: number of txqs to allocate for each group + * + * Returns 0 on success, negative on failure + */ +static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) +{ + bool flow_sch_en; + int err, i; + + vport->txq_grps = kcalloc(vport->num_txq_grp, + sizeof(*vport->txq_grps), GFP_KERNEL); + if (!vport->txq_grps) + return -ENOMEM; + + flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_SPLITQ_QSCHED); + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + struct idpf_adapter *adapter = vport->adapter; + int j; + + tx_qgrp->vport = vport; + tx_qgrp->num_txq = num_txq; + + for (j = 0; j < tx_qgrp->num_txq; j++) { + tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), + GFP_KERNEL); + if (!tx_qgrp->txqs[j]) { + err = -ENOMEM; + goto err_alloc; + } + } + + for (j = 0; j < tx_qgrp->num_txq; j++) { + struct idpf_queue *q = tx_qgrp->txqs[j]; + + q->dev = &adapter->pdev->dev; + q->desc_count = vport->txq_desc_count; + q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); + q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); + q->vport = vport; + q->txq_grp = tx_qgrp; + hash_init(q->sched_buf_hash); + + if (flow_sch_en) + set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, + sizeof(*tx_qgrp->complq), + GFP_KERNEL); + if (!tx_qgrp->complq) { + err = -ENOMEM; + goto err_alloc; + } + + tx_qgrp->complq->dev = &adapter->pdev->dev; + tx_qgrp->complq->desc_count = vport->complq_desc_count; + tx_qgrp->complq->vport = vport; + tx_qgrp->complq->txq_grp = tx_qgrp; + + if (flow_sch_en) + __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); + } + + return 0; + +err_alloc: + idpf_txq_group_rel(vport); + + return err; +} + +/** + * idpf_rxq_group_alloc - Allocate all rxq group resources + * @vport: vport to allocate rxq groups for + * @num_rxq: number of rxqs to allocate for each group + * + * Returns 0 on success, negative on failure + */ +static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_queue *q; + int i, k, err = 0; + + vport->rxq_grps = kcalloc(vport->num_rxq_grp, + sizeof(struct idpf_rxq_group), GFP_KERNEL); + if (!vport->rxq_grps) + return -ENOMEM; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + int j; + + rx_qgrp->vport = vport; + if (!idpf_is_queue_model_split(vport->rxq_model)) { + rx_qgrp->singleq.num_rxq = num_rxq; + for (j = 0; j < num_rxq; j++) { + rx_qgrp->singleq.rxqs[j] = + kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]), + GFP_KERNEL); + if (!rx_qgrp->singleq.rxqs[j]) { + err = -ENOMEM; + goto err_alloc; + } + } + goto skip_splitq_rx_init; + } + rx_qgrp->splitq.num_rxq_sets = num_rxq; + + for (j = 0; j < num_rxq; j++) { + rx_qgrp->splitq.rxq_sets[j] = + kzalloc(sizeof(struct idpf_rxq_set), + GFP_KERNEL); + if (!rx_qgrp->splitq.rxq_sets[j]) { + err = -ENOMEM; + goto err_alloc; + } + } + + rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, + sizeof(struct idpf_bufq_set), + GFP_KERNEL); + if (!rx_qgrp->splitq.bufq_sets) { + err = -ENOMEM; + goto err_alloc; + } + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_bufq_set *bufq_set = + &rx_qgrp->splitq.bufq_sets[j]; + int swq_size = sizeof(struct idpf_sw_queue); + + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + q->dev = &adapter->pdev->dev; + q->desc_count = vport->bufq_desc_count[j]; + q->vport = vport; + q->rxq_grp = rx_qgrp; + q->idx = j; + q->rx_buf_size = vport->bufq_size[j]; + q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; + q->rx_buf_stride = IDPF_RX_BUF_STRIDE; + if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT) && + idpf_is_queue_model_split(vport->rxq_model)) { + q->rx_hsplit_en = true; + q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; + } + + bufq_set->num_refillqs = num_rxq; + bufq_set->refillqs = kcalloc(num_rxq, swq_size, + GFP_KERNEL); + if (!bufq_set->refillqs) { + err = -ENOMEM; + goto err_alloc; + } + for (k = 0; k < bufq_set->num_refillqs; k++) { + struct idpf_sw_queue *refillq = + &bufq_set->refillqs[k]; + + refillq->dev = &vport->adapter->pdev->dev; + refillq->desc_count = + vport->bufq_desc_count[j]; + set_bit(__IDPF_Q_GEN_CHK, refillq->flags); + set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + refillq->ring = kcalloc(refillq->desc_count, + sizeof(u16), + GFP_KERNEL); + if (!refillq->ring) { + err = -ENOMEM; + goto err_alloc; + } + } + } + +skip_splitq_rx_init: + for (j = 0; j < num_rxq; j++) { + if (!idpf_is_queue_model_split(vport->rxq_model)) { + q = rx_qgrp->singleq.rxqs[j]; + goto setup_rxq; + } + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + rx_qgrp->splitq.rxq_sets[j]->refillq0 = + &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; + if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) + rx_qgrp->splitq.rxq_sets[j]->refillq1 = + &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; + + if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT) && + idpf_is_queue_model_split(vport->rxq_model)) { + q->rx_hsplit_en = true; + q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; + } + +setup_rxq: + q->dev = &adapter->pdev->dev; + q->desc_count = vport->rxq_desc_count; + q->vport = vport; + q->rxq_grp = rx_qgrp; + q->idx = (i * num_rxq) + j; + /* In splitq mode, RXQ buffer size should be + * set to that of the first buffer queue + * associated with this RXQ + */ + q->rx_buf_size = vport->bufq_size[0]; + q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; + q->rx_max_pkt_size = vport->netdev->mtu + + IDPF_PACKET_HDR_PAD; + idpf_rxq_set_descids(vport, q); + } + } + +err_alloc: + if (err) + idpf_rxq_group_rel(vport); + + return err; +} + +/** + * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources + * @vport: vport with qgrps to allocate + * + * Returns 0 on success, negative on failure + */ +static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) +{ + u16 num_txq, num_rxq; + int err; + + idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq); + + err = idpf_txq_group_alloc(vport, num_txq); + if (err) + goto err_out; + + err = idpf_rxq_group_alloc(vport, num_rxq); + if (err) + goto err_out; + + return 0; + +err_out: + idpf_vport_queue_grp_rel_all(vport); + + return err; +} + +/** + * idpf_vport_queues_alloc - Allocate memory for all queues + * @vport: virtual port + * + * Allocate memory for queues associated with a vport. Returns 0 on success, + * negative on failure. + */ +int idpf_vport_queues_alloc(struct idpf_vport *vport) +{ + int err; + + err = idpf_vport_queue_grp_alloc_all(vport); + if (err) + goto err_out; + + err = idpf_tx_desc_alloc_all(vport); + if (err) + goto err_out; + + err = idpf_rx_desc_alloc_all(vport); + if (err) + goto err_out; + + err = idpf_vport_init_fast_path_txqs(vport); + if (err) + goto err_out; + + return 0; + +err_out: + idpf_vport_queues_rel(vport); + + return err; +} + +/** + * idpf_tx_handle_sw_marker - Handle queue marker packet + * @tx_q: tx queue to handle software marker + */ +static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) +{ + struct idpf_vport *vport = tx_q->vport; + int i; + + clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); + /* Hardware must write marker packets to all queues associated with + * completion queues. So check if all queues received marker packets + */ + for (i = 0; i < vport->num_txq; i++) + /* If we're still waiting on any other TXQ marker completions, + * just return now since we cannot wake up the marker_wq yet. + */ + if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) + return; + + /* Drain complete */ + set_bit(IDPF_VPORT_SW_MARKER, vport->flags); + wake_up(&vport->sw_marker_wq); +} + +/** + * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of + * packet + * @tx_q: tx queue to clean buffer from + * @tx_buf: buffer to be cleaned + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @napi_budget: Used to determine if we are in netpoll + */ +static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, + struct idpf_tx_buf *tx_buf, + struct idpf_cleaned_stats *cleaned, + int napi_budget) +{ + napi_consume_skb(tx_buf->skb, napi_budget); + + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_single(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + dma_unmap_len_set(tx_buf, len, 0); + } + + /* clear tx_buf data */ + tx_buf->skb = NULL; + + cleaned->bytes += tx_buf->bytecount; + cleaned->packets += tx_buf->gso_segs; +} + +/** + * idpf_tx_clean_stashed_bufs - clean bufs that were stored for + * out of order completions + * @txq: queue to clean + * @compl_tag: completion tag of packet to clean (from completion descriptor) + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @budget: Used to determine if we are in netpoll + */ +static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, + struct idpf_cleaned_stats *cleaned, + int budget) +{ + struct idpf_tx_stash *stash; + struct hlist_node *tmp_buf; + + /* Buffer completion */ + hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf, + hlist, compl_tag) { + if (unlikely(stash->buf.compl_tag != (int)compl_tag)) + continue; + + if (stash->buf.skb) { + idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned, + budget); + } else if (dma_unmap_len(&stash->buf, len)) { + dma_unmap_page(txq->dev, + dma_unmap_addr(&stash->buf, dma), + dma_unmap_len(&stash->buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(&stash->buf, len, 0); + } + + /* Push shadow buf back onto stack */ + idpf_buf_lifo_push(&txq->buf_stack, stash); + + hash_del(&stash->hlist); + } +} + +/** + * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a + * later time (only relevant for flow scheduling mode) + * @txq: Tx queue to clean + * @tx_buf: buffer to store + */ +static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, + struct idpf_tx_buf *tx_buf) +{ + struct idpf_tx_stash *stash; + + if (unlikely(!dma_unmap_addr(tx_buf, dma) && + !dma_unmap_len(tx_buf, len))) + return 0; + + stash = idpf_buf_lifo_pop(&txq->buf_stack); + if (unlikely(!stash)) { + net_err_ratelimited("%s: No out-of-order TX buffers left!\n", + txq->vport->netdev->name); + + return -ENOMEM; + } + + /* Store buffer params in shadow buffer */ + stash->buf.skb = tx_buf->skb; + stash->buf.bytecount = tx_buf->bytecount; + stash->buf.gso_segs = tx_buf->gso_segs; + dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); + dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); + stash->buf.compl_tag = tx_buf->compl_tag; + + /* Add buffer to buf_hash table to be freed later */ + hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag); + + memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); + + /* Reinitialize buf_id portion of tag */ + tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + return 0; +} + +#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \ +do { \ + (ntc)++; \ + if (unlikely(!(ntc))) { \ + ntc -= (txq)->desc_count; \ + buf = (txq)->tx_buf; \ + desc = IDPF_FLEX_TX_DESC(txq, 0); \ + } else { \ + (buf)++; \ + (desc)++; \ + } \ +} while (0) + +/** + * idpf_tx_splitq_clean - Reclaim resources from buffer queue + * @tx_q: Tx queue to clean + * @end: queue index until which it should be cleaned + * @napi_budget: Used to determine if we are in netpoll + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @descs_only: true if queue is using flow-based scheduling and should + * not clean buffers at this time + * + * Cleans the queue descriptor ring. If the queue is using queue-based + * scheduling, the buffers will be cleaned as well. If the queue is using + * flow-based scheduling, only the descriptors are cleaned at this time. + * Separate packet completion events will be reported on the completion queue, + * and the buffers will be cleaned separately. The stats are not updated from + * this function when using flow-based scheduling. + */ +static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, + int napi_budget, + struct idpf_cleaned_stats *cleaned, + bool descs_only) +{ + union idpf_tx_flex_desc *next_pending_desc = NULL; + union idpf_tx_flex_desc *tx_desc; + s16 ntc = tx_q->next_to_clean; + struct idpf_tx_buf *tx_buf; + + tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); + next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); + tx_buf = &tx_q->tx_buf[ntc]; + ntc -= tx_q->desc_count; + + while (tx_desc != next_pending_desc) { + union idpf_tx_flex_desc *eop_desc; + + /* If this entry in the ring was used as a context descriptor, + * it's corresponding entry in the buffer ring will have an + * invalid completion tag since no buffer was used. We can + * skip this descriptor since there is no buffer to clean. + */ + if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG)) + goto fetch_next_txq_desc; + + eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + if (descs_only) { + if (idpf_stash_flow_sch_buffers(tx_q, tx_buf)) + goto tx_splitq_clean_out; + + while (tx_desc != eop_desc) { + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, + tx_desc, tx_buf); + + if (dma_unmap_len(tx_buf, len)) { + if (idpf_stash_flow_sch_buffers(tx_q, + tx_buf)) + goto tx_splitq_clean_out; + } + } + } else { + idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned, + napi_budget); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, + tx_desc, tx_buf); + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + } + +fetch_next_txq_desc: + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); + } + +tx_splitq_clean_out: + ntc += tx_q->desc_count; + tx_q->next_to_clean = ntc; +} + +#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \ +do { \ + (buf)++; \ + (ntc)++; \ + if (unlikely((ntc) == (txq)->desc_count)) { \ + buf = (txq)->tx_buf; \ + ntc = 0; \ + } \ +} while (0) + +/** + * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers + * @txq: queue to clean + * @compl_tag: completion tag of packet to clean (from completion descriptor) + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @budget: Used to determine if we are in netpoll + * + * Cleans all buffers associated with the input completion tag either from the + * TX buffer ring or from the hash table if the buffers were previously + * stashed. Returns the byte/segment count for the cleaned packet associated + * this completion tag. + */ +static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, + struct idpf_cleaned_stats *cleaned, + int budget) +{ + u16 idx = compl_tag & txq->compl_tag_bufid_m; + struct idpf_tx_buf *tx_buf = NULL; + u16 ntc = txq->next_to_clean; + u16 num_descs_cleaned = 0; + u16 orig_idx = idx; + + tx_buf = &txq->tx_buf[idx]; + + while (tx_buf->compl_tag == (int)compl_tag) { + if (tx_buf->skb) { + idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget); + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(txq->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + + memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); + tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + num_descs_cleaned++; + idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); + } + + /* If we didn't clean anything on the ring for this completion, there's + * nothing more to do. + */ + if (unlikely(!num_descs_cleaned)) + return false; + + /* Otherwise, if we did clean a packet on the ring directly, it's safe + * to assume that the descriptors starting from the original + * next_to_clean up until the previously cleaned packet can be reused. + * Therefore, we will go back in the ring and stash any buffers still + * in the ring into the hash table to be cleaned later. + */ + tx_buf = &txq->tx_buf[ntc]; + while (tx_buf != &txq->tx_buf[orig_idx]) { + idpf_stash_flow_sch_buffers(txq, tx_buf); + idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf); + } + + /* Finally, update next_to_clean to reflect the work that was just done + * on the ring, if any. If the packet was only cleaned from the hash + * table, the ring will not be impacted, therefore we should not touch + * next_to_clean. The updated idx is used here + */ + txq->next_to_clean = idx; + + return true; +} + +/** + * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers + * whether on the buffer ring or in the hash table + * @txq: Tx ring to clean + * @desc: pointer to completion queue descriptor to extract completion + * information from + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @budget: Used to determine if we are in netpoll + * + * Returns bytes/packets cleaned + */ +static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, + struct idpf_splitq_tx_compl_desc *desc, + struct idpf_cleaned_stats *cleaned, + int budget) +{ + u16 compl_tag; + + if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { + u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); + + return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); + } + + compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); + + /* If we didn't clean anything on the ring, this packet must be + * in the hash table. Go clean it there. + */ + if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget)) + idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget); +} + +/** + * idpf_tx_clean_complq - Reclaim resources on completion queue + * @complq: Tx ring to clean + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns true if there's any budget left (e.g. the clean is finished) + */ +static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, + int *cleaned) +{ + struct idpf_splitq_tx_compl_desc *tx_desc; + struct idpf_vport *vport = complq->vport; + s16 ntc = complq->next_to_clean; + struct idpf_netdev_priv *np; + unsigned int complq_budget; + bool complq_ok = true; + int i; + + complq_budget = vport->compln_clean_budget; + tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + ntc -= complq->desc_count; + + do { + struct idpf_cleaned_stats cleaned_stats = { }; + struct idpf_queue *tx_q; + int rel_tx_qid; + u16 hw_head; + u8 ctype; /* completion type */ + u16 gen; + + /* if the descriptor isn't done, no work yet to do */ + gen = (le16_to_cpu(tx_desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S; + if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) + break; + + /* Find necessary info of TX queue to clean buffers */ + rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S; + if (rel_tx_qid >= complq->txq_grp->num_txq || + !complq->txq_grp->txqs[rel_tx_qid]) { + dev_err(&complq->vport->adapter->pdev->dev, + "TxQ not found\n"); + goto fetch_next_desc; + } + tx_q = complq->txq_grp->txqs[rel_tx_qid]; + + /* Determine completion type */ + ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> + IDPF_TXD_COMPLQ_COMPL_TYPE_S; + switch (ctype) { + case IDPF_TXD_COMPLT_RE: + hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head); + + idpf_tx_splitq_clean(tx_q, hw_head, budget, + &cleaned_stats, true); + break; + case IDPF_TXD_COMPLT_RS: + idpf_tx_handle_rs_completion(tx_q, tx_desc, + &cleaned_stats, budget); + break; + case IDPF_TXD_COMPLT_SW_MARKER: + idpf_tx_handle_sw_marker(tx_q); + break; + default: + dev_err(&tx_q->vport->adapter->pdev->dev, + "Unknown TX completion type: %d\n", + ctype); + goto fetch_next_desc; + } + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); + u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); + tx_q->cleaned_pkts += cleaned_stats.packets; + tx_q->cleaned_bytes += cleaned_stats.bytes; + complq->num_completions++; + u64_stats_update_end(&tx_q->stats_sync); + +fetch_next_desc: + tx_desc++; + ntc++; + if (unlikely(!ntc)) { + ntc -= complq->desc_count; + tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); + change_bit(__IDPF_Q_GEN_CHK, complq->flags); + } + + prefetch(tx_desc); + + /* update budget accounting */ + complq_budget--; + } while (likely(complq_budget)); + + /* Store the state of the complq to be used later in deciding if a + * TXQ can be started again + */ + if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) + complq_ok = false; + + np = netdev_priv(complq->vport->netdev); + for (i = 0; i < complq->txq_grp->num_txq; ++i) { + struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; + struct netdev_queue *nq; + bool dont_wake; + + /* We didn't clean anything on this queue, move along */ + if (!tx_q->cleaned_bytes) + continue; + + *cleaned += tx_q->cleaned_pkts; + + /* Update BQL */ + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + + dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || + np->state != __IDPF_VPORT_UP || + !netif_carrier_ok(tx_q->vport->netdev); + /* Check if the TXQ needs to and can be restarted */ + __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, + IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, + dont_wake); + + /* Reset cleaned stats for the next time this queue is + * cleaned + */ + tx_q->cleaned_bytes = 0; + tx_q->cleaned_pkts = 0; + } + + ntc += complq->desc_count; + complq->next_to_clean = ntc; + + return !!complq_budget; +} + +/** + * idpf_tx_splitq_build_ctb - populate command tag and size for queue + * based scheduling descriptors + * @desc: descriptor to populate + * @params: pointer to tx params struct + * @td_cmd: command to be filled in desc + * @size: size of buffer + */ +void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size) +{ + desc->q.qw1.cmd_dtype = + cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M); + desc->q.qw1.cmd_dtype |= + cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) & + IDPF_FLEX_TXD_QW1_CMD_M); + desc->q.qw1.buf_size = cpu_to_le16((u16)size); + desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag); +} + +/** + * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow + * scheduling descriptors + * @desc: descriptor to populate + * @params: pointer to tx params struct + * @td_cmd: command to be filled in desc + * @size: size of buffer + */ +void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size) +{ + desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; + desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); + desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); +} + +/** + * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions + * @tx_q: the queue to be checked + * @size: number of descriptors we want to assure is available + * + * Returns 0 if stop is not needed + */ +int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) +{ + struct netdev_queue *nq; + + if (likely(IDPF_DESC_UNUSED(tx_q) >= size)) + return 0; + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + + return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); +} + +/** + * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions + * @tx_q: the queue to be checked + * @descs_needed: number of descriptors required for this packet + * + * Returns 0 if stop is not needed + */ +static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, + unsigned int descs_needed) +{ + if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) + goto splitq_stop; + + /* If there are too many outstanding completions expected on the + * completion queue, stop the TX queue to give the device some time to + * catch up + */ + if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) + goto splitq_stop; + + /* Also check for available book keeping buffers; if we are low, stop + * the queue to wait for more completions + */ + if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) + goto splitq_stop; + + return 0; + +splitq_stop: + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx); + + return -EBUSY; +} + +/** + * idpf_tx_buf_hw_update - Store the new tail value + * @tx_q: queue to bump + * @val: new tail index + * @xmit_more: more skb's pending + * + * The naming here is special in that 'hw' signals that this function is about + * to do a register write to update our queue status. We know this can only + * mean tail here as HW should be owning head for TX. + */ +void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, + bool xmit_more) +{ + struct netdev_queue *nq; + + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + tx_q->next_to_use = val; + + idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* notify HW of packet */ + if (netif_xmit_stopped(nq) || !xmit_more) + writel(val, tx_q->tail); +} + +/** + * idpf_tx_desc_count_required - calculate number of Tx descriptors needed + * @txq: queue to send buffer on + * @skb: send buffer + * + * Returns number of data descriptors needed for this skb. + */ +unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, + struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo; + unsigned int count = 0, i; + + count += !!skb_headlen(skb); + + if (!skb_is_nonlinear(skb)) + return count; + + shinfo = skb_shinfo(skb); + for (i = 0; i < shinfo->nr_frags; i++) { + unsigned int size; + + size = skb_frag_size(&shinfo->frags[i]); + + /* We only need to use the idpf_size_to_txd_count check if the + * fragment is going to span multiple descriptors, + * i.e. size >= 16K. + */ + if (size >= SZ_16K) + count += idpf_size_to_txd_count(size); + else + count++; + } + + if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) { + if (__skb_linearize(skb)) + return 0; + + count = idpf_size_to_txd_count(skb->len); + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.tx.linearize); + u64_stats_update_end(&txq->stats_sync); + } + + return count; +} + +/** + * idpf_tx_dma_map_error - handle TX DMA map errors + * @txq: queue to send buffer on + * @skb: send buffer + * @first: original first buffer info buffer for packet + * @idx: starting point on ring to unwind + */ +void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, + struct idpf_tx_buf *first, u16 idx) +{ + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.tx.dma_map_errs); + u64_stats_update_end(&txq->stats_sync); + + /* clear dma mappings for failed tx_buf map */ + for (;;) { + struct idpf_tx_buf *tx_buf; + + tx_buf = &txq->tx_buf[idx]; + idpf_tx_buf_rel(txq, tx_buf); + if (tx_buf == first) + break; + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + if (skb_is_gso(skb)) { + union idpf_tx_flex_desc *tx_desc; + + /* If we failed a DMA mapping for a TSO packet, we will have + * used one additional descriptor for a context + * descriptor. Reset that here. + */ + tx_desc = IDPF_FLEX_TX_DESC(txq, idx); + memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + /* Update tail in case netdev_xmit_more was previously true */ + idpf_tx_buf_hw_update(txq, idx, false); +} + +/** + * idpf_tx_splitq_bump_ntu - adjust NTU and generation + * @txq: the tx ring to wrap + * @ntu: ring index to bump + */ +static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) +{ + ntu++; + + if (ntu == txq->desc_count) { + ntu = 0; + txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); + } + + return ntu; +} + +/** + * idpf_tx_splitq_map - Build the Tx flex descriptor + * @tx_q: queue to send buffer on + * @params: pointer to splitq params struct + * @first: first buffer info buffer to use + * + * This function loops over the skb data pointed to by *first + * and gets a physical address for each memory location and programs + * it and the length into the transmit flex descriptor. + */ +static void idpf_tx_splitq_map(struct idpf_queue *tx_q, + struct idpf_tx_splitq_params *params, + struct idpf_tx_buf *first) +{ + union idpf_tx_flex_desc *tx_desc; + unsigned int data_len, size; + struct idpf_tx_buf *tx_buf; + u16 i = tx_q->next_to_use; + struct netdev_queue *nq; + struct sk_buff *skb; + skb_frag_t *frag; + u16 td_cmd = 0; + dma_addr_t dma; + + skb = first->skb; + + td_cmd = params->offload.td_cmd; + + data_len = skb->data_len; + size = skb_headlen(skb); + + tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); + + dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buf = first; + + params->compl_tag = + (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + + if (dma_mapping_error(tx_q->dev, dma)) + return idpf_tx_dma_map_error(tx_q, skb, first, i); + + tx_buf->compl_tag = params->compl_tag; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + /* buf_addr is in same location for both desc types */ + tx_desc->q.buf_addr = cpu_to_le64(dma); + + /* The stack can send us fragments that are too large for a + * single descriptor i.e. frag size > 16K-1. We will need to + * split the fragment across multiple descriptors in this case. + * To adhere to HW alignment restrictions, the fragment needs + * to be split such that the first chunk ends on a 4K boundary + * and all subsequent chunks start on a 4K boundary. We still + * want to send as much data as possible though, so our + * intermediate descriptor chunk size will be 12K. + * + * For example, consider a 32K fragment mapped to DMA addr 2600. + * ------------------------------------------------------------ + * | frag_size = 32K | + * ------------------------------------------------------------ + * |2600 |16384 |28672 + * + * 3 descriptors will be used for this fragment. The HW expects + * the descriptors to contain the following: + * ------------------------------------------------------------ + * | size = 13784 | size = 12K | size = 6696 | + * | dma = 2600 | dma = 16384 | dma = 28672 | + * ------------------------------------------------------------ + * + * We need to first adjust the max_data for the first chunk so + * that it ends on a 4K boundary. By negating the value of the + * DMA address and taking only the low order bits, we're + * effectively calculating + * 4K - (DMA addr lower order bits) = + * bytes to next boundary. + * + * Add that to our base aligned max_data (12K) and we have + * our first chunk size. In the example above, + * 13784 = 12K + (4096-2600) + * + * After guaranteeing the first chunk ends on a 4K boundary, we + * will give the intermediate descriptors 12K chunks and + * whatever is left to the final descriptor. This ensures that + * all descriptors used for the remaining chunks of the + * fragment start on a 4K boundary and we use as few + * descriptors as possible. + */ + max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); + while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) { + idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, + max_data); + + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + i = 0; + tx_q->compl_tag_cur_gen = + IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); + } + + /* Since this packet has a buffer that is going to span + * multiple descriptors, it's going to leave holes in + * to the TX buffer ring. To ensure these holes do not + * cause issues in the cleaning routines, we will clear + * them of any stale data and assign them the same + * completion tag as the current packet. Then when the + * packet is being cleaned, the cleaning routines will + * simply pass over these holes and finish cleaning the + * rest of the packet. + */ + memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); + tx_q->tx_buf[i].compl_tag = params->compl_tag; + + /* Adjust the DMA offset and the remaining size of the + * fragment. On the first iteration of this loop, + * max_data will be >= 12K and <= 16K-1. On any + * subsequent iteration of this loop, max_data will + * always be 12K. + */ + dma += max_data; + size -= max_data; + + /* Reset max_data since remaining chunks will be 12K + * at most + */ + max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + + /* buf_addr is in same location for both desc types */ + tx_desc->q.buf_addr = cpu_to_le64(dma); + } + + if (!data_len) + break; + + idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + i = 0; + tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buf = &tx_q->tx_buf[i]; + } + + /* record SW timestamp if HW timestamp is not available */ + skb_tx_timestamp(skb); + + /* write last descriptor with RS and EOP bits */ + td_cmd |= params->eop_cmd; + idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); + i = idpf_tx_splitq_bump_ntu(tx_q, i); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + tx_q->txq_grp->num_completions_pending++; + + /* record bytecount for BQL */ + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + netdev_tx_sent_queue(nq, first->bytecount); + + idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); +} + +/** + * idpf_tso - computes mss and TSO length to prepare for TSO + * @skb: pointer to skb + * @off: pointer to struct that holds offload parameters + * + * Returns error (negative) if TSO was requested but cannot be applied to the + * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise. + */ +int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off) +{ + const struct skb_shared_info *shinfo; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_start; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + shinfo = skb_shinfo(skb); + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else if (ip.v6->version == 6) { + ip.v6->payload_len = 0; + } + + l4_start = skb_transport_offset(skb); + + /* remove payload length from checksum */ + paylen = skb->len - l4_start; + + switch (shinfo->gso_type & ~SKB_GSO_DODGY) { + case SKB_GSO_TCPV4: + case SKB_GSO_TCPV6: + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start; + break; + case SKB_GSO_UDP_L4: + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + /* compute length of segmentation header */ + off->tso_hdr_len = sizeof(struct udphdr) + l4_start; + l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr)); + break; + default: + return -EINVAL; + } + + off->tso_len = skb->len - off->tso_hdr_len; + off->mss = shinfo->gso_size; + off->tso_segs = shinfo->gso_segs; + + off->tx_flags |= IDPF_TX_FLAGS_TSO; + + return 1; +} + +/** + * __idpf_chk_linearize - Check skb is not using too many buffers + * @skb: send buffer + * @max_bufs: maximum number of buffers + * + * For TSO we need to count the TSO header and segment payload separately. As + * such we need to check cases where we have max_bufs-1 fragments or more as we + * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1 + * for the segment payload in the first descriptor, and another max_buf-1 for + * the fragments. + */ +static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + const skb_frag_t *frag, *stale; + int nr_frags, sum; + + /* no need to check if number of frags is less than max_bufs - 1 */ + nr_frags = shinfo->nr_frags; + if (nr_frags < (max_bufs - 1)) + return false; + + /* We need to walk through the list and validate that each group + * of max_bufs-2 fragments totals at least gso_size. + */ + nr_frags -= max_bufs - 2; + frag = &shinfo->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We use + * this as the worst case scenario in which the frag ahead of us only + * provides one byte which is why we are limited to max_bufs-2 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - shinfo->gso_size; + + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + for (stale = &shinfo->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + + sum += skb_frag_size(frag++); + + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > IDPF_TX_MAX_DESC_DATA) { + int align_pad = -(skb_frag_off(stale)) & + (IDPF_TX_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED; + stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED; + } while (stale_size > IDPF_TX_MAX_DESC_DATA); + } + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + if (!nr_frags--) + break; + + sum -= stale_size; + } + + return false; +} + +/** + * idpf_chk_linearize - Check if skb exceeds max descriptors per packet + * @skb: send buffer + * @max_bufs: maximum scatter gather buffers for single packet + * @count: number of buffers this packet needs + * + * Make sure we don't exceed maximum scatter gather buffers for a single + * packet. We have to do some special checking around the boundary (max_bufs-1) + * if TSO is on since we need count the TSO header and payload separately. + * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO + * header, 1 for segment payload, and then 7 for the fragments. + */ +bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count) +{ + if (likely(count < max_bufs)) + return false; + if (skb_is_gso(skb)) + return __idpf_chk_linearize(skb, max_bufs); + + return count > max_bufs; +} + +/** + * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring + * @txq: queue to put context descriptor on + * + * Since the TX buffer rings mimics the descriptor ring, update the tx buffer + * ring entry to reflect that this index is a context descriptor + */ +static struct idpf_flex_tx_ctx_desc * +idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) +{ + struct idpf_flex_tx_ctx_desc *desc; + int i = txq->next_to_use; + + memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); + txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + /* grab the next descriptor */ + desc = IDPF_FLEX_TX_CTX_DESC(txq, i); + txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); + + return desc; +} + +/** + * idpf_tx_drop_skb - free the SKB and bump tail if necessary + * @tx_q: queue to send buffer on + * @skb: pointer to skb + */ +netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) +{ + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.skb_drops); + u64_stats_update_end(&tx_q->stats_sync); + + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/** + * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors + * @skb: send buffer + * @tx_q: queue to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, + struct idpf_queue *tx_q) +{ + struct idpf_tx_splitq_params tx_params = { }; + struct idpf_tx_buf *first; + unsigned int count; + int tso; + + count = idpf_tx_desc_count_required(tx_q, skb); + if (unlikely(!count)) + return idpf_tx_drop_skb(tx_q, skb); + + tso = idpf_tso(skb, &tx_params.offload); + if (unlikely(tso < 0)) + return idpf_tx_drop_skb(tx_q, skb); + + /* Check for splitq specific TX resources */ + count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso); + if (idpf_tx_maybe_stop_splitq(tx_q, count)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_BUSY; + } + + if (tso) { + /* If tso is needed, set up context desc */ + struct idpf_flex_tx_ctx_desc *ctx_desc = + idpf_tx_splitq_get_ctx_desc(tx_q); + + ctx_desc->tso.qw1.cmd_dtype = + cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | + IDPF_TX_FLEX_CTX_DESC_CMD_TSO); + ctx_desc->tso.qw0.flex_tlen = + cpu_to_le32(tx_params.offload.tso_len & + IDPF_TXD_FLEX_CTX_TLEN_M); + ctx_desc->tso.qw0.mss_rt = + cpu_to_le16(tx_params.offload.mss & + IDPF_TXD_FLEX_CTX_MSS_RT_M); + ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.lso_pkts); + u64_stats_update_end(&tx_q->stats_sync); + } + + /* record the location of the first descriptor for this packet */ + first = &tx_q->tx_buf[tx_q->next_to_use]; + first->skb = skb; + + if (tso) { + first->gso_segs = tx_params.offload.tso_segs; + first->bytecount = skb->len + + ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len); + } else { + first->gso_segs = 1; + first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + } + + if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) { + tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; + tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; + /* Set the RE bit to catch any packets that may have not been + * stashed during RS completion cleaning. MIN_GAP is set to + * MIN_RING size to ensure it will be set at least once each + * time around the ring. + */ + if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { + tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE; + tx_q->txq_grp->num_completions_pending++; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; + + } else { + tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2; + tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; + } + + idpf_tx_splitq_map(tx_q, &tx_params, first); + + return NETDEV_TX_OK; +} + +/** + * idpf_tx_splitq_start - Selects the right Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, + struct net_device *netdev) +{ + struct idpf_vport *vport = idpf_netdev_to_vport(netdev); + struct idpf_queue *tx_q; + + if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; + } + + tx_q = vport->txqs[skb_get_queue_mapping(skb)]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_OK; + } + + return idpf_tx_splitq_frame(skb, tx_q); +} + +/** + * idpf_ptype_to_htype - get a hash type + * @decoded: Decoded Rx packet type related fields + * + * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by + * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of + * Rx desc. + */ +enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded) +{ + if (!decoded->known) + return PKT_HASH_TYPE_NONE; + if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && + decoded->inner_prot) + return PKT_HASH_TYPE_L4; + if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && + decoded->outer_ip) + return PKT_HASH_TYPE_L3; + if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2) + return PKT_HASH_TYPE_L2; + + return PKT_HASH_TYPE_NONE; +} + +/** + * idpf_rx_hash - set the hash value in the skb + * @rxq: Rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being populated + * @rx_desc: Receive descriptor + * @decoded: Decoded Rx packet type related fields + */ +static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + u32 hash; + + if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH))) + return; + + hash = le16_to_cpu(rx_desc->hash1) | + (rx_desc->ff2_mirrid_hash2.hash2 << 16) | + (rx_desc->hash3 << 24); + + skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); +} + +/** + * idpf_rx_csum - Indicate in skb if checksum is good + * @rxq: Rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being populated + * @csum_bits: checksum fields extracted from the descriptor + * @decoded: Decoded Rx packet type related fields + * + * skb->protocol must be set before this function is called + */ +static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb, + struct idpf_rx_csum_decoded *csum_bits, + struct idpf_rx_ptype_decoded *decoded) +{ + bool ipv4, ipv6; + + /* check if Rx checksum is enabled */ + if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM))) + return; + + /* check if HW has decoded the packet and checksum */ + if (!(csum_bits->l3l4p)) + return; + + ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); + ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + + if (ipv4 && (csum_bits->ipe || csum_bits->eipe)) + goto checksum_fail; + + if (ipv6 && csum_bits->ipv6exadd) + return; + + /* check for L4 errors and handle packets that were not able to be + * checksummed + */ + if (csum_bits->l4e) + goto checksum_fail; + + /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ + switch (decoded->inner_prot) { + case IDPF_RX_PTYPE_INNER_PROT_ICMP: + case IDPF_RX_PTYPE_INNER_PROT_TCP: + case IDPF_RX_PTYPE_INNER_PROT_UDP: + if (!csum_bits->raw_csum_inv) { + u16 csum = csum_bits->raw_csum; + + skb->csum = csum_unfold((__force __sum16)~swab16(csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + break; + case IDPF_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + break; + } + + return; + +checksum_fail: + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_update_end(&rxq->stats_sync); +} + +/** + * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor + * @rx_desc: receive descriptor + * @csum: structure to extract checksum fields + * + **/ +static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct idpf_rx_csum_decoded *csum) +{ + u8 qword0, qword1; + + qword0 = rx_desc->status_err0_qw0; + qword1 = rx_desc->status_err0_qw1; + + csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + qword1); + csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, + qword1); + csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, + qword1); + csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, + qword1); + csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, + qword0); + csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M, + le16_to_cpu(rx_desc->ptype_err_fflags0)); + csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); +} + +/** + * idpf_rx_rsc - Set the RSC fields in the skb + * @rxq : Rx descriptor ring packet is being transacted on + * @skb : pointer to current skb being populated + * @rx_desc: Receive descriptor + * @decoded: Decoded Rx packet type related fields + * + * Return 0 on success and error code on failure + * + * Populate the skb fields with the total number of RSC segments, RSC payload + * length and packet type. + */ +static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + u16 rsc_segments, rsc_seg_len; + bool ipv4, ipv6; + int len; + + if (unlikely(!decoded->outer_ip)) + return -EINVAL; + + rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); + if (unlikely(!rsc_seg_len)) + return -EINVAL; + + ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); + ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + + if (unlikely(!(ipv4 ^ ipv6))) + return -EINVAL; + + rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); + if (unlikely(rsc_segments == 1)) + return 0; + + NAPI_GRO_CB(skb)->count = rsc_segments; + skb_shinfo(skb)->gso_size = rsc_seg_len; + + skb_reset_network_header(skb); + len = skb->len - skb_transport_offset(skb); + + if (ipv4) { + struct iphdr *ipv4h = ip_hdr(skb); + + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + /* Reset and set transport header offset in skb */ + skb_set_transport_header(skb, sizeof(struct iphdr)); + + /* Compute the TCP pseudo header checksum*/ + tcp_hdr(skb)->check = + ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0); + } else { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + tcp_hdr(skb)->check = + ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); + } + + tcp_gro_complete(skb); + + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.rsc_pkts); + u64_stats_update_end(&rxq->stats_sync); + + return 0; +} + +/** + * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor + * @rxq: Rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being populated + * @rx_desc: Receive descriptor + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, protocol, and + * other fields within the skb. + */ +static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, + struct sk_buff *skb, + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +{ + struct idpf_rx_csum_decoded csum_bits = { }; + struct idpf_rx_ptype_decoded decoded; + u16 rx_ptype; + + rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M, + le16_to_cpu(rx_desc->ptype_err_fflags0)); + + decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; + /* If we don't know the ptype we can't do anything else with it. Just + * pass it up the stack as-is. + */ + if (!decoded.known) + return 0; + + /* process RSS/hash */ + idpf_rx_hash(rxq, skb, rx_desc, &decoded); + + skb->protocol = eth_type_trans(skb, rxq->vport->netdev); + + if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M, + le16_to_cpu(rx_desc->hdrlen_flags))) + return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); + + idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); + idpf_rx_csum(rxq, skb, &csum_bits, &decoded); + + return 0; +} + +/** + * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag + * @rx_buf: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: packet length from rx_desc + * + * This function will add the data contained in rx_buf->page to the skb. + * It will just attach the page as a frag to the skb. + * The function will then update the page offset. + */ +void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, + unsigned int size) +{ + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + rx_buf->page_offset, size, rx_buf->truesize); + + rx_buf->page = NULL; +} + +/** + * idpf_rx_construct_skb - Allocate skb and populate it + * @rxq: Rx descriptor queue + * @rx_buf: Rx buffer to pull data from + * @size: the length of the packet + * + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. + */ +struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, + struct idpf_rx_buf *rx_buf, + unsigned int size) +{ + unsigned int headlen; + struct sk_buff *skb; + void *va; + + va = page_address(rx_buf->page) + rx_buf->page_offset; + + /* prefetch first cache line of first page */ + net_prefetch(va); + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE, + GFP_ATOMIC); + if (unlikely(!skb)) { + idpf_rx_put_page(rx_buf); + + return NULL; + } + + skb_record_rx_queue(skb, rxq->idx); + skb_mark_for_recycle(skb); + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IDPF_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* if we exhaust the linear part then add what is left as a frag */ + size -= headlen; + if (!size) { + idpf_rx_put_page(rx_buf); + + return skb; + } + + skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, + size, rx_buf->truesize); + + /* Since we're giving the page to the stack, clear our reference to it. + * We'll get a new one during buffer posting. + */ + rx_buf->page = NULL; + + return skb; +} + +/** + * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer + * @rxq: Rx descriptor queue + * @va: Rx buffer to pull data from + * @size: the length of the packet + * + * This function allocates an skb. It then populates it with the page data from + * the current receive descriptor, taking care to set up the skb correctly. + * This specifically uses a header buffer to start building the skb. + */ +static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, + const void *va, + unsigned int size) +{ + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC); + if (unlikely(!skb)) + return NULL; + + skb_record_rx_queue(skb, rxq->idx); + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* More than likely, a payload fragment, which will use a page from + * page_pool will be added to the SKB so mark it for recycle + * preemptively. And if not, it's inconsequential. + */ + skb_mark_for_recycle(skb); + + return skb; +} + +/** + * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor + * status and error fields + * @stat_err_field: field from descriptor to test bits in + * @stat_err_bits: value to mask + * + */ +static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field, + const u8 stat_err_bits) +{ + return !!(stat_err_field & stat_err_bits); +} + +/** + * idpf_rx_splitq_is_eop - process handling of EOP buffers + * @rx_desc: Rx descriptor for current buffer + * + * If the buffer is an EOP buffer, this function exits returning true, + * otherwise return false indicating that this is in fact a non-EOP buffer. + */ +static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +{ + /* if we are the last buffer then there is nothing else to do */ + return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1, + IDPF_RXD_EOF_SPLITQ)); +} + +/** + * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue + * @rxq: Rx descriptor queue to retrieve receive buffer queue + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed + */ +static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) +{ + int total_rx_bytes = 0, total_rx_pkts = 0; + struct idpf_queue *rx_bufq = NULL; + struct sk_buff *skb = rxq->skb; + u16 ntc = rxq->next_to_clean; + + /* Process Rx packets bounded by budget */ + while (likely(total_rx_pkts < budget)) { + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; + struct idpf_sw_queue *refillq = NULL; + struct idpf_rxq_set *rxq_set = NULL; + struct idpf_rx_buf *rx_buf = NULL; + union virtchnl2_rx_desc *desc; + unsigned int pkt_len = 0; + unsigned int hdr_len = 0; + u16 gen_id, buf_id = 0; + /* Header buffer overflow only valid for header split */ + bool hbo = false; + int bufq_id; + u8 rxdid; + + /* get the Rx desc from Rx queue based on 'next_to_clean' */ + desc = IDPF_RX_DESC(rxq, ntc); + rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc + */ + dma_rmb(); + + /* if the descriptor isn't done, no work yet to do */ + gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); + gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id); + + if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) + break; + + rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, + rx_desc->rxdid_ucast); + if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { + IDPF_RX_BUMP_NTC(rxq, ntc); + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.bad_descs); + u64_stats_update_end(&rxq->stats_sync); + continue; + } + + pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); + pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M, + pkt_len); + + hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, + rx_desc->status_err0_qw1); + + if (unlikely(hbo)) { + /* If a header buffer overflow, occurs, i.e. header is + * too large to fit in the header split buffer, HW will + * put the entire packet, including headers, in the + * data/payload buffer. + */ + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf); + u64_stats_update_end(&rxq->stats_sync); + goto bypass_hsplit; + } + + hdr_len = le16_to_cpu(rx_desc->hdrlen_flags); + hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M, + hdr_len); + +bypass_hsplit: + bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); + bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M, + bufq_id); + + rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); + if (!bufq_id) + refillq = rxq_set->refillq0; + else + refillq = rxq_set->refillq1; + + /* retrieve buffer from the rxq */ + rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq; + + buf_id = le16_to_cpu(rx_desc->buf_id); + + rx_buf = &rx_bufq->rx_buf.buf[buf_id]; + + if (hdr_len) { + const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va + + (u32)buf_id * IDPF_HDR_BUF_SIZE; + + skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len); + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts); + u64_stats_update_end(&rxq->stats_sync); + } + + if (pkt_len) { + idpf_rx_sync_for_cpu(rx_buf, pkt_len); + if (skb) + idpf_rx_add_frag(rx_buf, skb, pkt_len); + else + skb = idpf_rx_construct_skb(rxq, rx_buf, + pkt_len); + } else { + idpf_rx_put_page(rx_buf); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + idpf_rx_post_buf_refill(refillq, buf_id); + + IDPF_RX_BUMP_NTC(rxq, ntc); + /* skip if it is non EOP desc */ + if (!idpf_rx_splitq_is_eop(rx_desc)) + continue; + + /* pad skb if needed (to make valid ethernet frame) */ + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* protocol */ + if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) { + dev_kfree_skb_any(skb); + skb = NULL; + continue; + } + + /* send completed skb up the stack */ + napi_gro_receive(&rxq->q_vector->napi, skb); + skb = NULL; + + /* update budget accounting */ + total_rx_pkts++; + } + + rxq->next_to_clean = ntc; + + rxq->skb = skb; + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); + u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); + u64_stats_update_end(&rxq->stats_sync); + + /* guarantee a trip back through this routine if there was a failure */ + return total_rx_pkts; +} + +/** + * idpf_rx_update_bufq_desc - Update buffer queue descriptor + * @bufq: Pointer to the buffer queue + * @refill_desc: SW Refill queue descriptor containing buffer ID + * @buf_desc: Buffer queue descriptor + * + * Return 0 on success and negative on failure. + */ +static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, + struct virtchnl2_splitq_rx_buf_desc *buf_desc) +{ + struct idpf_rx_buf *buf; + dma_addr_t addr; + u16 buf_id; + + buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); + + buf = &bufq->rx_buf.buf[buf_id]; + + addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); + if (unlikely(addr == DMA_MAPPING_ERROR)) + return -ENOMEM; + + buf_desc->pkt_addr = cpu_to_le64(addr); + buf_desc->qword0.buf_id = cpu_to_le16(buf_id); + + if (!bufq->rx_hsplit_en) + return 0; + + buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa + + (u32)buf_id * IDPF_HDR_BUF_SIZE); + + return 0; +} + +/** + * idpf_rx_clean_refillq - Clean refill queue buffers + * @bufq: buffer queue to post buffers back to + * @refillq: refill queue to clean + * + * This function takes care of the buffer refill management + */ +static void idpf_rx_clean_refillq(struct idpf_queue *bufq, + struct idpf_sw_queue *refillq) +{ + struct virtchnl2_splitq_rx_buf_desc *buf_desc; + u16 bufq_nta = bufq->next_to_alloc; + u16 ntc = refillq->next_to_clean; + int cleaned = 0; + u16 gen; + + buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); + + /* make sure we stop at ring wrap in the unlikely case ring is full */ + while (likely(cleaned < refillq->desc_count)) { + u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); + bool failure; + + gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); + if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen) + break; + + failure = idpf_rx_update_bufq_desc(bufq, refill_desc, + buf_desc); + if (failure) + break; + + if (unlikely(++ntc == refillq->desc_count)) { + change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + ntc = 0; + } + + if (unlikely(++bufq_nta == bufq->desc_count)) { + buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); + bufq_nta = 0; + } else { + buf_desc++; + } + + cleaned++; + } + + if (!cleaned) + return; + + /* We want to limit how many transactions on the bus we trigger with + * tail writes so we only do it in strides. It's also important we + * align the write to a multiple of 8 as required by HW. + */ + if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) + + bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE) + idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta, + IDPF_RX_BUF_POST_STRIDE)); + + /* update next to alloc since we have filled the ring */ + refillq->next_to_clean = ntc; + bufq->next_to_alloc = bufq_nta; +} + +/** + * idpf_rx_clean_refillq_all - Clean all refill queues + * @bufq: buffer queue with refill queues + * + * Iterates through all refill queues assigned to the buffer queue assigned to + * this vector. Returns true if clean is complete within budget, false + * otherwise. + */ +static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq) +{ + struct idpf_bufq_set *bufq_set; + int i; + + bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); + for (i = 0; i < bufq_set->num_refillqs; i++) + idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); +} + +/** + * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + * + */ +static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq, + void *data) +{ + struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data; + + q_vector->total_events++; + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport + * @vport: virtual port structure + * + */ +static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport) +{ + u16 v_idx; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) + netif_napi_del(&vport->q_vectors[v_idx].napi); +} + +/** + * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport + * @vport: main vport structure + */ +static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) +{ + int v_idx; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) + napi_disable(&vport->q_vectors[v_idx].napi); +} + +/** + * idpf_vport_intr_rel - Free memory allocated for interrupt vectors + * @vport: virtual port + * + * Free the memory allocated for interrupt vectors associated to a vport + */ +void idpf_vport_intr_rel(struct idpf_vport *vport) +{ + int i, j, v_idx; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + + kfree(q_vector->bufq); + q_vector->bufq = NULL; + kfree(q_vector->tx); + q_vector->tx = NULL; + kfree(q_vector->rx); + q_vector->rx = NULL; + } + + /* Clean up the mapping of queues to vectors */ + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + + if (idpf_is_queue_model_split(vport->rxq_model)) + for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++) + rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL; + else + for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) + rx_qgrp->singleq.rxqs[j]->q_vector = NULL; + } + + if (idpf_is_queue_model_split(vport->txq_model)) + for (i = 0; i < vport->num_txq_grp; i++) + vport->txq_grps[i].complq->q_vector = NULL; + else + for (i = 0; i < vport->num_txq_grp; i++) + for (j = 0; j < vport->txq_grps[i].num_txq; j++) + vport->txq_grps[i].txqs[j]->q_vector = NULL; + + kfree(vport->q_vectors); + vport->q_vectors = NULL; +} + +/** + * idpf_vport_intr_rel_irq - Free the IRQ association with the OS + * @vport: main vport structure + */ +static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + int vector; + + for (vector = 0; vector < vport->num_q_vectors; vector++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; + int irq_num, vidx; + + /* free only the irqs that were actually requested */ + if (!q_vector) + continue; + + vidx = vport->q_vector_idxs[vector]; + irq_num = adapter->msix_entries[vidx].vector; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, q_vector); + } +} + +/** + * idpf_vport_intr_dis_irq_all - Disable all interrupt + * @vport: main vport structure + */ +static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) +{ + struct idpf_q_vector *q_vector = vport->q_vectors; + int q_idx; + + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) + writel(0, q_vector[q_idx].intr_reg.dyn_ctl); +} + +/** + * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings + * @q_vector: pointer to q_vector + * @type: itr index + * @itr: itr value + */ +static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector, + const int type, u16 itr) +{ + u32 itr_val; + + itr &= IDPF_ITR_MASK; + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ + itr_val = q_vector->intr_reg.dyn_ctl_intena_m | + (type << q_vector->intr_reg.dyn_ctl_itridx_s) | + (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); + + return itr_val; +} + +/** + * idpf_update_dim_sample - Update dim sample with packets and bytes + * @q_vector: the vector associated with the interrupt + * @dim_sample: dim sample to update + * @dim: dim instance structure + * @packets: total packets + * @bytes: total bytes + * + * Update the dim sample with the packets and bytes which are passed to this + * function. Set the dim state appropriately if the dim settings gets stale. + */ +static void idpf_update_dim_sample(struct idpf_q_vector *q_vector, + struct dim_sample *dim_sample, + struct dim *dim, u64 packets, u64 bytes) +{ + dim_update_sample(q_vector->total_events, packets, bytes, dim_sample); + dim_sample->comp_ctr = 0; + + /* if dim settings get stale, like when not updated for 1 second or + * longer, force it to start again. This addresses the frequent case + * of an idle queue being switched to by the scheduler. + */ + if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ) + dim->state = DIM_START_MEASURE; +} + +/** + * idpf_net_dim - Update net DIM algorithm + * @q_vector: the vector associated with the interrupt + * + * Create a DIM sample and notify net_dim() so that it can possibly decide + * a new ITR value based on incoming packets, bytes, and interrupts. + * + * This function is a no-op if the queue is not configured to dynamic ITR. + */ +static void idpf_net_dim(struct idpf_q_vector *q_vector) +{ + struct dim_sample dim_sample = { }; + u64 packets, bytes; + u32 i; + + if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode)) + goto check_rx_itr; + + for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { + struct idpf_queue *txq = q_vector->tx[i]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&txq->stats_sync); + packets += u64_stats_read(&txq->q_stats.tx.packets); + bytes += u64_stats_read(&txq->q_stats.tx.bytes); + } while (u64_stats_fetch_retry(&txq->stats_sync, start)); + } + + idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim, + packets, bytes); + net_dim(&q_vector->tx_dim, dim_sample); + +check_rx_itr: + if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode)) + return; + + for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { + struct idpf_queue *rxq = q_vector->rx[i]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&rxq->stats_sync); + packets += u64_stats_read(&rxq->q_stats.rx.packets); + bytes += u64_stats_read(&rxq->q_stats.rx.bytes); + } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); + } + + idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim, + packets, bytes); + net_dim(&q_vector->rx_dim, dim_sample); +} + +/** + * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + * Update the net_dim() algorithm and re-enable the interrupt associated with + * this vector. + */ +void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) +{ + u32 intval; + + /* net_dim() updates ITR out-of-band using a work item */ + idpf_net_dim(q_vector); + + intval = idpf_vport_intr_buildreg_itr(q_vector, + IDPF_NO_ITR_UPDATE_IDX, 0); + + writel(intval, q_vector->intr_reg.dyn_ctl); +} + +/** + * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport + * @vport: main vport structure + * @basename: name for the vector + */ +static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) +{ + struct idpf_adapter *adapter = vport->adapter; + int vector, err, irq_num, vidx; + const char *vec_name; + + for (vector = 0; vector < vport->num_q_vectors; vector++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; + + vidx = vport->q_vector_idxs[vector]; + irq_num = adapter->msix_entries[vidx].vector; + + if (q_vector->num_rxq && q_vector->num_txq) + vec_name = "TxRx"; + else if (q_vector->num_rxq) + vec_name = "Rx"; + else if (q_vector->num_txq) + vec_name = "Tx"; + else + continue; + + q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", + basename, vec_name, vidx); + + err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, + q_vector->name, q_vector); + if (err) { + netdev_err(vport->netdev, + "Request_irq failed, error: %d\n", err); + goto free_q_irqs; + } + /* assign the mask for this irq */ + irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + } + + return 0; + +free_q_irqs: + while (--vector >= 0) { + vidx = vport->q_vector_idxs[vector]; + irq_num = adapter->msix_entries[vidx].vector; + free_irq(irq_num, &vport->q_vectors[vector]); + } + + return err; +} + +/** + * idpf_vport_intr_write_itr - Write ITR value to the ITR register + * @q_vector: q_vector structure + * @itr: Interrupt throttling rate + * @tx: Tx or Rx ITR + */ +void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx) +{ + struct idpf_intr_reg *intr_reg; + + if (tx && !q_vector->tx) + return; + else if (!tx && !q_vector->rx) + return; + + intr_reg = &q_vector->intr_reg; + writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S, + tx ? intr_reg->tx_itr : intr_reg->rx_itr); +} + +/** + * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport + * @vport: main vport structure + */ +static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport) +{ + bool dynamic; + int q_idx; + u16 itr; + + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { + struct idpf_q_vector *qv = &vport->q_vectors[q_idx]; + + /* Set the initial ITR values */ + if (qv->num_txq) { + dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); + itr = vport->tx_itr_profile[qv->tx_dim.profile_ix]; + idpf_vport_intr_write_itr(qv, dynamic ? + itr : qv->tx_itr_value, + true); + } + + if (qv->num_rxq) { + dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); + itr = vport->rx_itr_profile[qv->rx_dim.profile_ix]; + idpf_vport_intr_write_itr(qv, dynamic ? + itr : qv->rx_itr_value, + false); + } + + if (qv->num_txq || qv->num_rxq) + idpf_vport_intr_update_itr_ena_irq(qv); + } +} + +/** + * idpf_vport_intr_deinit - Release all vector associations for the vport + * @vport: main vport structure + */ +void idpf_vport_intr_deinit(struct idpf_vport *vport) +{ + idpf_vport_intr_napi_dis_all(vport); + idpf_vport_intr_napi_del_all(vport); + idpf_vport_intr_dis_irq_all(vport); + idpf_vport_intr_rel_irq(vport); +} + +/** + * idpf_tx_dim_work - Call back from the stack + * @work: work queue structure + */ +static void idpf_tx_dim_work(struct work_struct *work) +{ + struct idpf_q_vector *q_vector; + struct idpf_vport *vport; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + q_vector = container_of(dim, struct idpf_q_vector, tx_dim); + vport = q_vector->vport; + + if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile)) + dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1; + + /* look up the values in our local table */ + itr = vport->tx_itr_profile[dim->profile_ix]; + + idpf_vport_intr_write_itr(q_vector, itr, true); + + dim->state = DIM_START_MEASURE; +} + +/** + * idpf_rx_dim_work - Call back from the stack + * @work: work queue structure + */ +static void idpf_rx_dim_work(struct work_struct *work) +{ + struct idpf_q_vector *q_vector; + struct idpf_vport *vport; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + q_vector = container_of(dim, struct idpf_q_vector, rx_dim); + vport = q_vector->vport; + + if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile)) + dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1; + + /* look up the values in our local table */ + itr = vport->rx_itr_profile[dim->profile_ix]; + + idpf_vport_intr_write_itr(q_vector, itr, false); + + dim->state = DIM_START_MEASURE; +} + +/** + * idpf_init_dim - Set up dynamic interrupt moderation + * @qv: q_vector structure + */ +static void idpf_init_dim(struct idpf_q_vector *qv) +{ + INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work); + qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; + + INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work); + qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; +} + +/** + * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport + * @vport: main vport structure + */ +static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) +{ + int q_idx; + + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx]; + + idpf_init_dim(q_vector); + napi_enable(&q_vector->napi); + } +} + +/** + * idpf_tx_splitq_clean_all- Clean completion queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, + int budget, int *cleaned) +{ + u16 num_txq = q_vec->num_txq; + bool clean_complete = true; + int i, budget_per_q; + + if (unlikely(!num_txq)) + return true; + + budget_per_q = DIV_ROUND_UP(budget, num_txq); + for (i = 0; i < num_txq; i++) + clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], + budget_per_q, cleaned); + + return clean_complete; +} + +/** + * idpf_rx_splitq_clean_all- Clean completion queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, + int *cleaned) +{ + u16 num_rxq = q_vec->num_rxq; + bool clean_complete = true; + int pkts_cleaned = 0; + int i, budget_per_q; + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; + for (i = 0; i < num_rxq; i++) { + struct idpf_queue *rxq = q_vec->rx[i]; + int pkts_cleaned_per_q; + + pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); + /* if we clean as many as budgeted, we must not be done */ + if (pkts_cleaned_per_q >= budget_per_q) + clean_complete = false; + pkts_cleaned += pkts_cleaned_per_q; + } + *cleaned = pkts_cleaned; + + for (i = 0; i < q_vec->num_bufq; i++) + idpf_rx_clean_refillq_all(q_vec->bufq[i]); + + return clean_complete; +} + +/** + * idpf_vport_splitq_napi_poll - NAPI handler + * @napi: struct from which you get q_vector + * @budget: budget provided by stack + */ +static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) +{ + struct idpf_q_vector *q_vector = + container_of(napi, struct idpf_q_vector, napi); + bool clean_complete; + int work_done = 0; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (unlikely(!budget)) { + idpf_tx_splitq_clean_all(q_vector, budget, &work_done); + + return 0; + } + + clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done); + clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done); + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) + return budget; + + work_done = min_t(int, work_done, budget - 1); + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + idpf_vport_intr_update_itr_ena_irq(q_vector); + + /* Switch to poll mode in the tear-down path after sending disable + * queues virtchnl message, as the interrupts will be disabled after + * that + */ + if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, + q_vector->tx[0]->flags))) + return budget; + else + return work_done; +} + +/** + * idpf_vport_intr_map_vector_to_qs - Map vectors to queues + * @vport: virtual port + * + * Mapping for vectors to queues + */ +static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) +{ + u16 num_txq_grp = vport->num_txq_grp; + int i, j, qv_idx, bufq_vidx = 0; + struct idpf_rxq_group *rx_qgrp; + struct idpf_txq_group *tx_qgrp; + struct idpf_queue *q, *bufq; + u16 q_index; + + for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { + u16 num_rxq; + + rx_qgrp = &vport->rxq_grps[i]; + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_rxq; + q->q_vector->rx[q_index] = q; + q->q_vector->num_rxq++; + qv_idx++; + } + + if (idpf_is_queue_model_split(vport->rxq_model)) { + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; + bufq->q_vector = &vport->q_vectors[bufq_vidx]; + q_index = bufq->q_vector->num_bufq; + bufq->q_vector->bufq[q_index] = bufq; + bufq->q_vector->num_bufq++; + } + if (++bufq_vidx >= vport->num_q_vectors) + bufq_vidx = 0; + } + } + + for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { + u16 num_txq; + + tx_qgrp = &vport->txq_grps[i]; + num_txq = tx_qgrp->num_txq; + + if (idpf_is_queue_model_split(vport->txq_model)) { + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + + q = tx_qgrp->complq; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + qv_idx++; + } else { + for (j = 0; j < num_txq; j++) { + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + + q = tx_qgrp->txqs[j]; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + + qv_idx++; + } + } + } +} + +/** + * idpf_vport_intr_init_vec_idx - Initialize the vector indexes + * @vport: virtual port + * + * Initialize vector indexes with values returened over mailbox + */ +static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_alloc_vectors *ac; + u16 *vecids, total_vecs; + int i; + + ac = adapter->req_vec_chunks; + if (!ac) { + for (i = 0; i < vport->num_q_vectors; i++) + vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; + + return 0; + } + + total_vecs = idpf_get_reserved_vecs(adapter); + vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); + if (!vecids) + return -ENOMEM; + + idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks); + + for (i = 0; i < vport->num_q_vectors; i++) + vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; + + kfree(vecids); + + return 0; +} + +/** + * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors + * @vport: virtual port structure + */ +static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) +{ + int (*napi_poll)(struct napi_struct *napi, int budget); + u16 v_idx; + + if (idpf_is_queue_model_split(vport->txq_model)) + napi_poll = idpf_vport_splitq_napi_poll; + else + napi_poll = idpf_vport_singleq_napi_poll; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + + netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); + + /* only set affinity_mask if the CPU is online */ + if (cpu_online(v_idx)) + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + } +} + +/** + * idpf_vport_intr_alloc - Allocate memory for interrupt vectors + * @vport: virtual port + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +int idpf_vport_intr_alloc(struct idpf_vport *vport) +{ + u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; + struct idpf_q_vector *q_vector; + int v_idx, err; + + vport->q_vectors = kcalloc(vport->num_q_vectors, + sizeof(struct idpf_q_vector), GFP_KERNEL); + if (!vport->q_vectors) + return -ENOMEM; + + txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors); + rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors); + bufqs_per_vector = vport->num_bufqs_per_qgrp * + DIV_ROUND_UP(vport->num_rxq_grp, + vport->num_q_vectors); + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + q_vector = &vport->q_vectors[v_idx]; + q_vector->vport = vport; + + q_vector->tx_itr_value = IDPF_ITR_TX_DEF; + q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; + + q_vector->rx_itr_value = IDPF_ITR_RX_DEF; + q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; + + q_vector->tx = kcalloc(txqs_per_vector, + sizeof(struct idpf_queue *), + GFP_KERNEL); + if (!q_vector->tx) { + err = -ENOMEM; + goto error; + } + + q_vector->rx = kcalloc(rxqs_per_vector, + sizeof(struct idpf_queue *), + GFP_KERNEL); + if (!q_vector->rx) { + err = -ENOMEM; + goto error; + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + continue; + + q_vector->bufq = kcalloc(bufqs_per_vector, + sizeof(struct idpf_queue *), + GFP_KERNEL); + if (!q_vector->bufq) { + err = -ENOMEM; + goto error; + } + } + + return 0; + +error: + idpf_vport_intr_rel(vport); + + return err; +} + +/** + * idpf_vport_intr_init - Setup all vectors for the given vport + * @vport: virtual port + * + * Returns 0 on success or negative on failure + */ +int idpf_vport_intr_init(struct idpf_vport *vport) +{ + char *int_name; + int err; + + err = idpf_vport_intr_init_vec_idx(vport); + if (err) + return err; + + idpf_vport_intr_map_vector_to_qs(vport); + idpf_vport_intr_napi_add_all(vport); + idpf_vport_intr_napi_ena_all(vport); + + err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); + if (err) + goto unroll_vectors_alloc; + + int_name = kasprintf(GFP_KERNEL, "%s-%s", + dev_driver_string(&vport->adapter->pdev->dev), + vport->netdev->name); + + err = idpf_vport_intr_req_irq(vport, int_name); + if (err) + goto unroll_vectors_alloc; + + idpf_vport_intr_ena_irq_all(vport); + + return 0; + +unroll_vectors_alloc: + idpf_vport_intr_napi_dis_all(vport); + idpf_vport_intr_napi_del_all(vport); + + return err; +} + +/** + * idpf_config_rss - Send virtchnl messages to configure RSS + * @vport: virtual port + * + * Return 0 on success, negative on failure + */ +int idpf_config_rss(struct idpf_vport *vport) +{ + int err; + + err = idpf_send_get_set_rss_key_msg(vport, false); + if (err) + return err; + + return idpf_send_get_set_rss_lut_msg(vport, false); +} + +/** + * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values + * @vport: virtual port structure + */ +static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + u16 num_active_rxq = vport->num_rxq; + struct idpf_rss_data *rss_data; + int i; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + + for (i = 0; i < rss_data->rss_lut_size; i++) { + rss_data->rss_lut[i] = i % num_active_rxq; + rss_data->cached_lut[i] = rss_data->rss_lut[i]; + } +} + +/** + * idpf_init_rss - Allocate and initialize RSS resources + * @vport: virtual port + * + * Return 0 on success, negative on failure + */ +int idpf_init_rss(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_rss_data *rss_data; + u32 lut_size; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + + lut_size = rss_data->rss_lut_size * sizeof(u32); + rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL); + if (!rss_data->rss_lut) + return -ENOMEM; + + rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL); + if (!rss_data->cached_lut) { + kfree(rss_data->rss_lut); + rss_data->rss_lut = NULL; + + return -ENOMEM; + } + + /* Fill the default RSS lut values */ + idpf_fill_dflt_rss_lut(vport); + + return idpf_config_rss(vport); +} + +/** + * idpf_deinit_rss - Release RSS resources + * @vport: virtual port + */ +void idpf_deinit_rss(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_rss_data *rss_data; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + kfree(rss_data->cached_lut); + rss_data->cached_lut = NULL; + kfree(rss_data->rss_lut); + rss_data->rss_lut = NULL; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h new file mode 100644 index 0000000000..df76493faa --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -0,0 +1,1023 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_TXRX_H_ +#define _IDPF_TXRX_H_ + +#include +#include +#include + +#define IDPF_LARGE_MAX_Q 256 +#define IDPF_MAX_Q 16 +#define IDPF_MIN_Q 2 +/* Mailbox Queue */ +#define IDPF_MAX_MBXQ 1 + +#define IDPF_MIN_TXQ_DESC 64 +#define IDPF_MIN_RXQ_DESC 64 +#define IDPF_MIN_TXQ_COMPLQ_DESC 256 +#define IDPF_MAX_QIDS 256 + +/* Number of descriptors in a queue should be a multiple of 32. RX queue + * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE + * to achieve BufQ descriptors aligned to 32 + */ +#define IDPF_REQ_DESC_MULTIPLE 32 +#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32) +#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6) +#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2) + +#define IDPF_MAX_DESCS 8160 +#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE) +#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE) +#define MIN_SUPPORT_TXDID (\ + VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\ + VIRTCHNL2_TXDID_FLEX_TSO_CTX) + +#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1 +#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1 +#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4 +#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4 + +#define IDPF_COMPLQ_PER_GROUP 1 +#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1 +#define IDPF_MAX_BUFQS_PER_RXQ_GRP 2 +#define IDPF_BUFQ2_ENA 1 +#define IDPF_NUMQ_PER_CHUNK 1 + +#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1 +#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1 + +/* Default vector sharing */ +#define IDPF_MBX_Q_VEC 1 +#define IDPF_MIN_Q_VEC 1 + +#define IDPF_DFLT_TX_Q_DESC_COUNT 512 +#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512 +#define IDPF_DFLT_RX_Q_DESC_COUNT 512 + +/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a + * given RX completion queue has descriptors. This includes _ALL_ buffer + * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers, + * you have a total of 1024 buffers so your RX queue _must_ have at least that + * many descriptors. This macro divides a given number of RX descriptors by + * number of buffer queues to calculate how many descriptors each buffer queue + * can have without overrunning the RX queue. + * + * If you give hardware more buffers than completion descriptors what will + * happen is that if hardware gets a chance to post more than ring wrap of + * descriptors before SW gets an interrupt and overwrites SW head, the gen bit + * in the descriptor will be wrong. Any overwritten descriptors' buffers will + * be gone forever and SW has no reasonable way to tell that this has happened. + * From SW perspective, when we finally get an interrupt, it looks like we're + * still waiting for descriptor to be done, stalling forever. + */ +#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ)) + +#define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1) + +#define IDPF_RX_BUMP_NTC(rxq, ntc) \ +do { \ + if (unlikely(++(ntc) == (rxq)->desc_count)) { \ + ntc = 0; \ + change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \ + } \ +} while (0) + +#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \ +do { \ + if (unlikely(++(idx) == (q)->desc_count)) \ + idx = 0; \ +} while (0) + +#define IDPF_RX_HDR_SIZE 256 +#define IDPF_RX_BUF_2048 2048 +#define IDPF_RX_BUF_4096 4096 +#define IDPF_RX_BUF_STRIDE 32 +#define IDPF_RX_BUF_POST_STRIDE 16 +#define IDPF_LOW_WATERMARK 64 +/* Size of header buffer specifically for header split */ +#define IDPF_HDR_BUF_SIZE 256 +#define IDPF_PACKET_HDR_PAD \ + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2) +#define IDPF_TX_TSO_MIN_MSS 88 + +/* Minimum number of descriptors between 2 descriptors with the RE bit set; + * only relevant in flow scheduling mode + */ +#define IDPF_TX_SPLITQ_RE_MIN_GAP 64 + +#define IDPF_RX_BI_BUFID_S 0 +#define IDPF_RX_BI_BUFID_M GENMASK(14, 0) +#define IDPF_RX_BI_GEN_S 15 +#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S) +#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M +#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M + +#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \ + (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i])) +#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \ + (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i])) +#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i]) + +#define IDPF_BASE_TX_DESC(txq, i) \ + (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i])) +#define IDPF_BASE_TX_CTX_DESC(txq, i) \ + (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i])) +#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \ + (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i])) + +#define IDPF_FLEX_TX_DESC(txq, i) \ + (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i])) +#define IDPF_FLEX_TX_CTX_DESC(txq, i) \ + (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i])) + +#define IDPF_DESC_UNUSED(txq) \ + ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ + (txq)->next_to_clean - (txq)->next_to_use - 1) + +#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top) +#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ + (txq)->desc_count >> 2) + +#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1) +/* Determine the absolute number of completions pending, i.e. the number of + * completions that are expected to arrive on the TX completion queue. + */ +#define IDPF_TX_COMPLQ_PENDING(txq) \ + (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \ + 0 : U64_MAX) + \ + (txq)->num_completions_pending - (txq)->complq->num_completions) + +#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16 +#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1 +/* Adjust the generation for the completion tag and wrap if necessary */ +#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ + ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ + 0 : (txq)->compl_tag_cur_gen) + +#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS) + +#define IDPF_TX_FLAGS_TSO BIT(0) +#define IDPF_TX_FLAGS_IPV4 BIT(1) +#define IDPF_TX_FLAGS_IPV6 BIT(2) +#define IDPF_TX_FLAGS_TUNNEL BIT(3) + +union idpf_tx_flex_desc { + struct idpf_flex_tx_desc q; /* queue based scheduling */ + struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ +}; + +/** + * struct idpf_tx_buf + * @next_to_watch: Next descriptor to clean + * @skb: Pointer to the skb + * @dma: DMA address + * @len: DMA length + * @bytecount: Number of bytes + * @gso_segs: Number of GSO segments + * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare + * with completion tag returned in buffer completion event. + * Because the completion tag is expected to be the same in all + * data descriptors for a given packet, and a single packet can + * span multiple buffers, we need this field to track all + * buffers associated with this completion tag independently of + * the buf_id. The tag consists of a N bit buf_id and M upper + * order "generation bits". See compl_tag_bufid_m and + * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1 + * to indicate the tag is not valid. + * @ctx_entry: Singleq only. Used to indicate the corresponding entry + * in the descriptor ring was used for a context descriptor and + * this buffer entry should be skipped. + */ +struct idpf_tx_buf { + void *next_to_watch; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + unsigned int bytecount; + unsigned short gso_segs; + + union { + int compl_tag; + + bool ctx_entry; + }; +}; + +struct idpf_tx_stash { + struct hlist_node hlist; + struct idpf_tx_buf buf; +}; + +/** + * struct idpf_buf_lifo - LIFO for managing OOO completions + * @top: Used to know how many buffers are left + * @size: Total size of LIFO + * @bufs: Backing array + */ +struct idpf_buf_lifo { + u16 top; + u16 size; + struct idpf_tx_stash **bufs; +}; + +/** + * struct idpf_tx_offload_params - Offload parameters for a given packet + * @tx_flags: Feature flags enabled for this packet + * @hdr_offsets: Offset parameter for single queue model + * @cd_tunneling: Type of tunneling enabled for single queue model + * @tso_len: Total length of payload to segment + * @mss: Segment size + * @tso_segs: Number of segments to be sent + * @tso_hdr_len: Length of headers to be duplicated + * @td_cmd: Command field to be inserted into descriptor + */ +struct idpf_tx_offload_params { + u32 tx_flags; + + u32 hdr_offsets; + u32 cd_tunneling; + + u32 tso_len; + u16 mss; + u16 tso_segs; + u16 tso_hdr_len; + + u16 td_cmd; +}; + +/** + * struct idpf_tx_splitq_params + * @dtype: General descriptor info + * @eop_cmd: Type of EOP + * @compl_tag: Associated tag for completion + * @td_tag: Descriptor tunneling tag + * @offload: Offload parameters + */ +struct idpf_tx_splitq_params { + enum idpf_tx_desc_dtype_value dtype; + u16 eop_cmd; + union { + u16 compl_tag; + u16 td_tag; + }; + + struct idpf_tx_offload_params offload; +}; + +enum idpf_tx_ctx_desc_eipt_offload { + IDPF_TX_CTX_EXT_IP_NONE = 0x0, + IDPF_TX_CTX_EXT_IP_IPV6 = 0x1, + IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + IDPF_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +/* Checksum offload bits decoded from the receive descriptor. */ +struct idpf_rx_csum_decoded { + u32 l3l4p : 1; + u32 ipe : 1; + u32 eipe : 1; + u32 eudpe : 1; + u32 ipv6exadd : 1; + u32 l4e : 1; + u32 pprs : 1; + u32 nat : 1; + u32 raw_csum_inv : 1; + u32 raw_csum : 16; +}; + +struct idpf_rx_extracted { + unsigned int size; + u16 rx_ptype; +}; + +#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256 +#define IDPF_TX_MIN_PKT_LEN 17 +#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1 +#define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \ + sizeof(struct idpf_flex_tx_desc)) +#define IDPF_TX_DESCS_FOR_CTX 1 +/* TX descriptors needed, worst case */ +#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \ + IDPF_TX_DESCS_PER_CACHE_LINE + \ + IDPF_TX_DESCS_FOR_SKB_DATA_PTR) + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K +#define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1) +#define IDPF_TX_MAX_DESC_DATA_ALIGNED \ + ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE) + +#define IDPF_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define IDPF_RX_DESC(rxq, i) \ + (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i])) + +struct idpf_rx_buf { + struct page *page; + unsigned int page_offset; + u16 truesize; +}; + +#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 +#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \ + (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS)) +#define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info) +#define IDPF_RX_MAX_PTYPES_PER_BUF \ + DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \ + IDPF_RX_MAX_PTYPE_SZ) + +#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count) + +#define IDPF_TUN_IP_GRE (\ + IDPF_PTYPE_TUNNEL_IP |\ + IDPF_PTYPE_TUNNEL_IP_GRENAT) + +#define IDPF_TUN_IP_GRE_MAC (\ + IDPF_TUN_IP_GRE |\ + IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC) + +#define IDPF_RX_MAX_PTYPE 1024 +#define IDPF_RX_MAX_BASE_PTYPE 256 +#define IDPF_INVALID_PTYPE_ID 0xFFFF + +/* Packet type non-ip values */ +enum idpf_rx_ptype_l2 { + IDPF_RX_PTYPE_L2_RESERVED = 0, + IDPF_RX_PTYPE_L2_MAC_PAY2 = 1, + IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + IDPF_RX_PTYPE_L2_FIP_PAY2 = 3, + IDPF_RX_PTYPE_L2_OUI_PAY2 = 4, + IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6, + IDPF_RX_PTYPE_L2_ECP_PAY2 = 7, + IDPF_RX_PTYPE_L2_EVB_PAY2 = 8, + IDPF_RX_PTYPE_L2_QCN_PAY2 = 9, + IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10, + IDPF_RX_PTYPE_L2_ARP = 11, +}; + +enum idpf_rx_ptype_outer_ip { + IDPF_RX_PTYPE_OUTER_L2 = 0, + IDPF_RX_PTYPE_OUTER_IP = 1, +}; + +#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \ + (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \ + ((ptype)->outer_ip_ver == (ipv))) + +enum idpf_rx_ptype_outer_ip_ver { + IDPF_RX_PTYPE_OUTER_NONE = 0, + IDPF_RX_PTYPE_OUTER_IPV4 = 1, + IDPF_RX_PTYPE_OUTER_IPV6 = 2, +}; + +enum idpf_rx_ptype_outer_fragmented { + IDPF_RX_PTYPE_NOT_FRAG = 0, + IDPF_RX_PTYPE_FRAG = 1, +}; + +enum idpf_rx_ptype_tunnel_type { + IDPF_RX_PTYPE_TUNNEL_NONE = 0, + IDPF_RX_PTYPE_TUNNEL_IP_IP = 1, + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum idpf_rx_ptype_tunnel_end_prot { + IDPF_RX_PTYPE_TUNNEL_END_NONE = 0, + IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1, + IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum idpf_rx_ptype_inner_prot { + IDPF_RX_PTYPE_INNER_PROT_NONE = 0, + IDPF_RX_PTYPE_INNER_PROT_UDP = 1, + IDPF_RX_PTYPE_INNER_PROT_TCP = 2, + IDPF_RX_PTYPE_INNER_PROT_SCTP = 3, + IDPF_RX_PTYPE_INNER_PROT_ICMP = 4, + IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5, +}; + +enum idpf_rx_ptype_payload_layer { + IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +enum idpf_tunnel_state { + IDPF_PTYPE_TUNNEL_IP = BIT(0), + IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1), + IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2), +}; + +struct idpf_ptype_state { + bool outer_ip; + bool outer_frag; + u8 tunnel_state; +}; + +struct idpf_rx_ptype_decoded { + u32 ptype:10; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:2; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +/** + * enum idpf_queue_flags_t + * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to + * identify new descriptor writebacks on the ring. HW sets + * the gen bit to 1 on the first writeback of any given + * descriptor. After the ring wraps, HW sets the gen bit of + * those descriptors to 0, and continues flipping + * 0->1 or 1->0 on each ring wrap. SW maintains its own + * gen bit to know what value will indicate writebacks on + * the next pass around the ring. E.g. it is initialized + * to 1 and knows that reading a gen bit of 1 in any + * descriptor on the initial pass of the ring indicates a + * writeback. It also flips on every ring wrap. + * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit + * and RFLGQ_GEN is the SW bit. + * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling + * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions + * @__IDPF_Q_POLL_MODE: Enable poll mode + * @__IDPF_Q_FLAGS_NBITS: Must be last + */ +enum idpf_queue_flags_t { + __IDPF_Q_GEN_CHK, + __IDPF_RFLQ_GEN_CHK, + __IDPF_Q_FLOW_SCH_EN, + __IDPF_Q_SW_MARKER, + __IDPF_Q_POLL_MODE, + + __IDPF_Q_FLAGS_NBITS, +}; + +/** + * struct idpf_vec_regs + * @dyn_ctl_reg: Dynamic control interrupt register offset + * @itrn_reg: Interrupt Throttling Rate register offset + * @itrn_index_spacing: Register spacing between ITR registers of the same + * vector + */ +struct idpf_vec_regs { + u32 dyn_ctl_reg; + u32 itrn_reg; + u32 itrn_index_spacing; +}; + +/** + * struct idpf_intr_reg + * @dyn_ctl: Dynamic control interrupt register + * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable + * @dyn_ctl_itridx_s: Register bit offset for ITR index + * @dyn_ctl_itridx_m: Mask for ITR index + * @dyn_ctl_intrvl_s: Register bit offset for ITR interval + * @rx_itr: RX ITR register + * @tx_itr: TX ITR register + * @icr_ena: Interrupt cause register offset + * @icr_ena_ctlq_m: Mask for ICR + */ +struct idpf_intr_reg { + void __iomem *dyn_ctl; + u32 dyn_ctl_intena_m; + u32 dyn_ctl_itridx_s; + u32 dyn_ctl_itridx_m; + u32 dyn_ctl_intrvl_s; + void __iomem *rx_itr; + void __iomem *tx_itr; + void __iomem *icr_ena; + u32 icr_ena_ctlq_m; +}; + +/** + * struct idpf_q_vector + * @vport: Vport back pointer + * @affinity_mask: CPU affinity mask + * @napi: napi handler + * @v_idx: Vector index + * @intr_reg: See struct idpf_intr_reg + * @num_txq: Number of TX queues + * @tx: Array of TX queues to service + * @tx_dim: Data for TX net_dim algorithm + * @tx_itr_value: TX interrupt throttling rate + * @tx_intr_mode: Dynamic ITR or not + * @tx_itr_idx: TX ITR index + * @num_rxq: Number of RX queues + * @rx: Array of RX queues to service + * @rx_dim: Data for RX net_dim algorithm + * @rx_itr_value: RX interrupt throttling rate + * @rx_intr_mode: Dynamic ITR or not + * @rx_itr_idx: RX ITR index + * @num_bufq: Number of buffer queues + * @bufq: Array of buffer queues to service + * @total_events: Number of interrupts processed + * @name: Queue vector name + */ +struct idpf_q_vector { + struct idpf_vport *vport; + cpumask_t affinity_mask; + struct napi_struct napi; + u16 v_idx; + struct idpf_intr_reg intr_reg; + + u16 num_txq; + struct idpf_queue **tx; + struct dim tx_dim; + u16 tx_itr_value; + bool tx_intr_mode; + u32 tx_itr_idx; + + u16 num_rxq; + struct idpf_queue **rx; + struct dim rx_dim; + u16 rx_itr_value; + bool rx_intr_mode; + u32 rx_itr_idx; + + u16 num_bufq; + struct idpf_queue **bufq; + + u16 total_events; + char *name; +}; + +struct idpf_rx_queue_stats { + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t rsc_pkts; + u64_stats_t hw_csum_err; + u64_stats_t hsplit_pkts; + u64_stats_t hsplit_buf_ovf; + u64_stats_t bad_descs; +}; + +struct idpf_tx_queue_stats { + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t lso_pkts; + u64_stats_t linearize; + u64_stats_t q_busy; + u64_stats_t skb_drops; + u64_stats_t dma_map_errs; +}; + +struct idpf_cleaned_stats { + u32 packets; + u32 bytes; +}; + +union idpf_queue_stats { + struct idpf_rx_queue_stats rx; + struct idpf_tx_queue_stats tx; +}; + +#define IDPF_ITR_DYNAMIC 1 +#define IDPF_ITR_MAX 0x1FE0 +#define IDPF_ITR_20K 0x0032 +#define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */ +#define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */ +#define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK) +#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode) +#define IDPF_ITR_TX_DEF IDPF_ITR_20K +#define IDPF_ITR_RX_DEF IDPF_ITR_20K +/* Index used for 'No ITR' update in DYN_CTL register */ +#define IDPF_NO_ITR_UPDATE_IDX 3 +#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt) +#define IDPF_DIM_DEFAULT_PROFILE_IX 1 + +/** + * struct idpf_queue + * @dev: Device back pointer for DMA mapping + * @vport: Back pointer to associated vport + * @txq_grp: See struct idpf_txq_group + * @rxq_grp: See struct idpf_rxq_group + * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean, + * buffer queue uses this index to determine which group of refill queues + * to clean. + * For TX queue, it is used as index to map between TX queue group and + * hot path TX pointers stored in vport. Used in both singleq/splitq. + * For RX queue, it is used to index to total RX queue across groups and + * used for skb reporting. + * @tail: Tail offset. Used for both queue models single and split. In splitq + * model relevant only for TX queue and RX queue. + * @tx_buf: See struct idpf_tx_buf + * @rx_buf: Struct with RX buffer related members + * @rx_buf.buf: See struct idpf_rx_buf + * @rx_buf.hdr_buf_pa: DMA handle + * @rx_buf.hdr_buf_va: Virtual address + * @pp: Page pool pointer + * @skb: Pointer to the skb + * @q_type: Queue type (TX, RX, TX completion, RX buffer) + * @q_id: Queue id + * @desc_count: Number of descriptors + * @next_to_use: Next descriptor to use. Relevant in both split & single txq + * and bufq. + * @next_to_clean: Next descriptor to clean. In split queue model, only + * relevant to TX completion queue and RX queue. + * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model + * only relevant to RX queue. + * @flags: See enum idpf_queue_flags_t + * @q_stats: See union idpf_queue_stats + * @stats_sync: See struct u64_stats_sync + * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on + * the TX completion queue, it can be for any TXQ associated + * with that completion queue. This means we can clean up to + * N TXQs during a single call to clean the completion queue. + * cleaned_bytes|pkts tracks the clean stats per TXQ during + * that single call to clean the completion queue. By doing so, + * we can update BQL with aggregate cleaned stats for each TXQ + * only once at the end of the cleaning routine. + * @cleaned_pkts: Number of packets cleaned for the above said case + * @rx_hsplit_en: RX headsplit enable + * @rx_hbuf_size: Header buffer size + * @rx_buf_size: Buffer size + * @rx_max_pkt_size: RX max packet size + * @rx_buf_stride: RX buffer stride + * @rx_buffer_low_watermark: RX buffer low watermark + * @rxdids: Supported RX descriptor ids + * @q_vector: Backreference to associated vector + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @desc_ring: Descriptor ring memory + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + * @tx_min_pkt_len: Min supported packet length + * @num_completions: Only relevant for TX completion queue. It tracks the + * number of completions received to compare against the + * number of completions pending, as accumulated by the + * TX queues. + * @buf_stack: Stack of empty buffers to store buffer info for out of order + * buffer completions. See struct idpf_buf_lifo. + * @compl_tag_bufid_m: Completion tag buffer id mask + * @compl_tag_gen_s: Completion tag generation bit + * The format of the completion tag will change based on the TXQ + * descriptor ring size so that we can maintain roughly the same level + * of "uniqueness" across all descriptor sizes. For example, if the + * TXQ descriptor ring size is 64 (the minimum size supported), the + * completion tag will be formatted as below: + * 15 6 5 0 + * -------------------------------- + * | GEN=0-1023 |IDX = 0-63| + * -------------------------------- + * + * This gives us 64*1024 = 65536 possible unique values. Similarly, if + * the TXQ descriptor ring size is 8160 (the maximum size supported), + * the completion tag will be formatted as below: + * 15 13 12 0 + * -------------------------------- + * |GEN | IDX = 0-8159 | + * -------------------------------- + * + * This gives us 8*8160 = 65280 possible unique values. + * @compl_tag_cur_gen: Used to keep track of current completion tag generation + * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @sched_buf_hash: Hash table to stores buffers + */ +struct idpf_queue { + struct device *dev; + struct idpf_vport *vport; + union { + struct idpf_txq_group *txq_grp; + struct idpf_rxq_group *rxq_grp; + }; + u16 idx; + void __iomem *tail; + union { + struct idpf_tx_buf *tx_buf; + struct { + struct idpf_rx_buf *buf; + dma_addr_t hdr_buf_pa; + void *hdr_buf_va; + } rx_buf; + }; + struct page_pool *pp; + struct sk_buff *skb; + u16 q_type; + u32 q_id; + u16 desc_count; + + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + + union idpf_queue_stats q_stats; + struct u64_stats_sync stats_sync; + + u32 cleaned_bytes; + u16 cleaned_pkts; + + bool rx_hsplit_en; + u16 rx_hbuf_size; + u16 rx_buf_size; + u16 rx_max_pkt_size; + u16 rx_buf_stride; + u8 rx_buffer_low_watermark; + u64 rxdids; + struct idpf_q_vector *q_vector; + unsigned int size; + dma_addr_t dma; + void *desc_ring; + + u16 tx_max_bufs; + u8 tx_min_pkt_len; + + u32 num_completions; + + struct idpf_buf_lifo buf_stack; + + u16 compl_tag_bufid_m; + u16 compl_tag_gen_s; + + u16 compl_tag_cur_gen; + u16 compl_tag_gen_max; + + DECLARE_HASHTABLE(sched_buf_hash, 12); +} ____cacheline_internodealigned_in_smp; + +/** + * struct idpf_sw_queue + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: Buffer to allocate at + * @flags: See enum idpf_queue_flags_t + * @ring: Pointer to the ring + * @desc_count: Descriptor count + * @dev: Device back pointer for DMA mapping + * + * Software queues are used in splitq mode to manage buffers between rxq + * producer and the bufq consumer. These are required in order to maintain a + * lockless buffer management system and are strictly software only constructs. + */ +struct idpf_sw_queue { + u16 next_to_clean; + u16 next_to_alloc; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 *ring; + u16 desc_count; + struct device *dev; +} ____cacheline_internodealigned_in_smp; + +/** + * struct idpf_rxq_set + * @rxq: RX queue + * @refillq0: Pointer to refill queue 0 + * @refillq1: Pointer to refill queue 1 + * + * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs. + * Each rxq needs a refillq to return used buffers back to the respective bufq. + * Bufqs then clean these refillqs for buffers to give to hardware. + */ +struct idpf_rxq_set { + struct idpf_queue rxq; + struct idpf_sw_queue *refillq0; + struct idpf_sw_queue *refillq1; +}; + +/** + * struct idpf_bufq_set + * @bufq: Buffer queue + * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets + * in idpf_rxq_group. + * @refillqs: Pointer to refill queues array. + * + * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs. + * In this bufq_set, there will be one refillq for each rxq in this rxq_group. + * Used buffers received by rxqs will be put on refillqs which bufqs will + * clean to return new buffers back to hardware. + * + * Buffers needed by some number of rxqs associated in this rxq_group are + * managed by at most two bufqs (depending on performance configuration). + */ +struct idpf_bufq_set { + struct idpf_queue bufq; + int num_refillqs; + struct idpf_sw_queue *refillqs; +}; + +/** + * struct idpf_rxq_group + * @vport: Vport back pointer + * @singleq: Struct with single queue related members + * @singleq.num_rxq: Number of RX queues associated + * @singleq.rxqs: Array of RX queue pointers + * @splitq: Struct with split queue related members + * @splitq.num_rxq_sets: Number of RX queue sets + * @splitq.rxq_sets: Array of RX queue sets + * @splitq.bufq_sets: Buffer queue set pointer + * + * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a + * rxq_group contains all the rxqs, bufqs and refillqs needed to + * manage buffers in splitq mode. + */ +struct idpf_rxq_group { + struct idpf_vport *vport; + + union { + struct { + u16 num_rxq; + struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q]; + } singleq; + struct { + u16 num_rxq_sets; + struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q]; + struct idpf_bufq_set *bufq_sets; + } splitq; + }; +}; + +/** + * struct idpf_txq_group + * @vport: Vport back pointer + * @num_txq: Number of TX queues associated + * @txqs: Array of TX queue pointers + * @complq: Associated completion queue pointer, split queue only + * @num_completions_pending: Total number of completions pending for the + * completion queue, acculumated for all TX queues + * associated with that completion queue. + * + * Between singleq and splitq, a txq_group is largely the same except for the + * complq. In splitq a single complq is responsible for handling completions + * for some number of txqs associated in this txq_group. + */ +struct idpf_txq_group { + struct idpf_vport *vport; + + u16 num_txq; + struct idpf_queue *txqs[IDPF_LARGE_MAX_Q]; + + struct idpf_queue *complq; + + u32 num_completions_pending; +}; + +/** + * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag + * @size: transmit request size in bytes + * + * In the case where a large frag (>= 16K) needs to be split across multiple + * descriptors, we need to assume that we can have no more than 12K of data + * per descriptor due to hardware alignment restrictions (4K alignment). + */ +static inline u32 idpf_size_to_txd_count(unsigned int size) +{ + return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED); +} + +/** + * idpf_tx_singleq_build_ctob - populate command tag offset and size + * @td_cmd: Command to be filled in desc + * @td_offset: Offset to be filled in desc + * @size: Size of the buffer + * @td_tag: td tag to be filled + * + * Returns the 64 bit value populated with the input parameters + */ +static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset, + unsigned int size, u64 td_tag) +{ + return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA | + (td_cmd << IDPF_TXD_QW1_CMD_S) | + (td_offset << IDPF_TXD_QW1_OFFSET_S) | + ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) | + (td_tag << IDPF_TXD_QW1_L2TAG1_S)); +} + +void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size); +void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size); +/** + * idpf_tx_splitq_build_desc - determine which type of data descriptor to build + * @desc: descriptor to populate + * @params: pointer to tx params struct + * @td_cmd: command to be filled in desc + * @size: size of buffer + */ +static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size) +{ + if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2) + idpf_tx_splitq_build_ctb(desc, params, td_cmd, size); + else + idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size); +} + +/** + * idpf_alloc_page - Allocate a new RX buffer from the page pool + * @pool: page_pool to allocate from + * @buf: metadata struct to populate with page info + * @buf_size: 2K or 4K + * + * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise. + */ +static inline dma_addr_t idpf_alloc_page(struct page_pool *pool, + struct idpf_rx_buf *buf, + unsigned int buf_size) +{ + if (buf_size == IDPF_RX_BUF_2048) + buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset, + buf_size); + else + buf->page = page_pool_dev_alloc_pages(pool); + + if (!buf->page) + return DMA_MAPPING_ERROR; + + buf->truesize = buf_size; + + return page_pool_get_dma_addr(buf->page) + buf->page_offset + + pool->p.offset; +} + +/** + * idpf_rx_put_page - Return RX buffer page to pool + * @rx_buf: RX buffer metadata struct + */ +static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf) +{ + page_pool_put_page(rx_buf->page->pp, rx_buf->page, + rx_buf->truesize, true); + rx_buf->page = NULL; +} + +/** + * idpf_rx_sync_for_cpu - Synchronize DMA buffer + * @rx_buf: RX buffer metadata struct + * @len: frame length from descriptor + */ +static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len) +{ + struct page *page = rx_buf->page; + struct page_pool *pp = page->pp; + + dma_sync_single_range_for_cpu(pp->p.dev, + page_pool_get_dma_addr(page), + rx_buf->page_offset + pp->p.offset, len, + page_pool_get_dma_dir(pp)); +} + +int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); +void idpf_vport_init_num_qs(struct idpf_vport *vport, + struct virtchnl2_create_vport *vport_msg); +void idpf_vport_calc_num_q_desc(struct idpf_vport *vport); +int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index, + struct virtchnl2_create_vport *vport_msg, + struct idpf_vport_max_q *max_q); +void idpf_vport_calc_num_q_groups(struct idpf_vport *vport); +int idpf_vport_queues_alloc(struct idpf_vport *vport); +void idpf_vport_queues_rel(struct idpf_vport *vport); +void idpf_vport_intr_rel(struct idpf_vport *vport); +int idpf_vport_intr_alloc(struct idpf_vport *vport); +void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector); +void idpf_vport_intr_deinit(struct idpf_vport *vport); +int idpf_vport_intr_init(struct idpf_vport *vport); +enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded); +int idpf_config_rss(struct idpf_vport *vport); +int idpf_init_rss(struct idpf_vport *vport); +void idpf_deinit_rss(struct idpf_vport *vport); +int idpf_rx_bufs_init_all(struct idpf_vport *vport); +void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, + unsigned int size); +struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, + struct idpf_rx_buf *rx_buf, + unsigned int size); +bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf); +void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val); +void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, + bool xmit_more); +unsigned int idpf_size_to_txd_count(unsigned int size); +netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb); +void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, + struct idpf_tx_buf *first, u16 ring_idx); +unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, + struct sk_buff *skb); +bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count); +int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size); +void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); +netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, + struct net_device *netdev); +netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, + struct net_device *netdev); +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, + u16 cleaned_count); +int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); + +#endif /* !_IDPF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c new file mode 100644 index 0000000000..8ade4e3a9f --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" +#include "idpf_lan_vf_regs.h" + +#define IDPF_VF_ITR_IDX_SPACING 0x40 + +/** + * idpf_vf_ctlq_reg_init - initialize default mailbox registers + * @cq: pointer to the array of create control queues + */ +static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +{ + int i; + + for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { + struct idpf_ctlq_create_info *ccq = cq + i; + + switch (ccq->type) { + case IDPF_CTLQ_TYPE_MAILBOX_TX: + /* set head and tail registers in our local struct */ + ccq->reg.head = VF_ATQH; + ccq->reg.tail = VF_ATQT; + ccq->reg.len = VF_ATQLEN; + ccq->reg.bah = VF_ATQBAH; + ccq->reg.bal = VF_ATQBAL; + ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M; + ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M; + ccq->reg.head_mask = VF_ATQH_ATQH_M; + break; + case IDPF_CTLQ_TYPE_MAILBOX_RX: + /* set head and tail registers in our local struct */ + ccq->reg.head = VF_ARQH; + ccq->reg.tail = VF_ARQT; + ccq->reg.len = VF_ARQLEN; + ccq->reg.bah = VF_ARQBAH; + ccq->reg.bal = VF_ARQBAL; + ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M; + ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M; + ccq->reg.head_mask = VF_ARQH_ARQH_M; + break; + default: + break; + } + } +} + +/** + * idpf_vf_mb_intr_reg_init - Initialize the mailbox register + * @adapter: adapter structure + */ +static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter) +{ + struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; + u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl); + + intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl); + intr->dyn_ctl_intena_m = VF_INT_DYN_CTL0_INTENA_M; + intr->dyn_ctl_itridx_m = VF_INT_DYN_CTL0_ITR_INDX_M; + intr->icr_ena = idpf_get_reg_addr(adapter, VF_INT_ICR0_ENA1); + intr->icr_ena_ctlq_m = VF_INT_ICR0_ENA1_ADMINQ_M; +} + +/** + * idpf_vf_intr_reg_init - Initialize interrupt registers + * @vport: virtual port structure + */ +static int idpf_vf_intr_reg_init(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + int num_vecs = vport->num_q_vectors; + struct idpf_vec_regs *reg_vals; + int num_regs, i, err = 0; + u32 rx_itr, tx_itr; + u16 total_vecs; + + total_vecs = idpf_get_reserved_vecs(vport->adapter); + reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs), + GFP_KERNEL); + if (!reg_vals) + return -ENOMEM; + + num_regs = idpf_get_reg_intr_vecs(vport, reg_vals); + if (num_regs < num_vecs) { + err = -EINVAL; + goto free_reg_vals; + } + + for (i = 0; i < num_vecs; i++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[i]; + u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC; + struct idpf_intr_reg *intr = &q_vector->intr_reg; + u32 spacing; + + intr->dyn_ctl = idpf_get_reg_addr(adapter, + reg_vals[vec_id].dyn_ctl_reg); + intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M; + intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S; + + spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, + IDPF_VF_ITR_IDX_SPACING); + rx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_0, + reg_vals[vec_id].itrn_reg, + spacing); + tx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_1, + reg_vals[vec_id].itrn_reg, + spacing); + intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr); + intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr); + } + +free_reg_vals: + kfree(reg_vals); + + return err; +} + +/** + * idpf_vf_reset_reg_init - Initialize reset registers + * @adapter: Driver specific private structure + */ +static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter) +{ + adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT); + adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M; +} + +/** + * idpf_vf_trigger_reset - trigger reset + * @adapter: Driver specific private structure + * @trig_cause: Reason to trigger a reset + */ +static void idpf_vf_trigger_reset(struct idpf_adapter *adapter, + enum idpf_flags trig_cause) +{ + /* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */ + if (trig_cause == IDPF_HR_FUNC_RESET && + !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL); +} + +/** + * idpf_vf_reg_ops_init - Initialize register API function pointers + * @adapter: Driver specific private structure + */ +static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter) +{ + adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_vf_ctlq_reg_init; + adapter->dev_ops.reg_ops.intr_reg_init = idpf_vf_intr_reg_init; + adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_vf_mb_intr_reg_init; + adapter->dev_ops.reg_ops.reset_reg_init = idpf_vf_reset_reg_init; + adapter->dev_ops.reg_ops.trigger_reset = idpf_vf_trigger_reset; +} + +/** + * idpf_vf_dev_ops_init - Initialize device API function pointers + * @adapter: Driver specific private structure + */ +void idpf_vf_dev_ops_init(struct idpf_adapter *adapter) +{ + idpf_vf_reg_ops_init(adapter); +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c new file mode 100644 index 0000000000..2c1b051fdc --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -0,0 +1,3798 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_recv_event_msg - Receive virtchnl event message + * @vport: virtual port structure + * @ctlq_msg: message to copy from + * + * Receive virtchnl event message + */ +static void idpf_recv_event_msg(struct idpf_vport *vport, + struct idpf_ctlq_msg *ctlq_msg) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + struct virtchnl2_event *v2e; + bool link_status; + u32 event; + + v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; + event = le32_to_cpu(v2e->event); + + switch (event) { + case VIRTCHNL2_EVENT_LINK_CHANGE: + vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); + link_status = v2e->link_status; + + if (vport->link_up == link_status) + break; + + vport->link_up = link_status; + if (np->state == __IDPF_VPORT_UP) { + if (vport->link_up) { + netif_carrier_on(vport->netdev); + netif_tx_start_all_queues(vport->netdev); + } else { + netif_tx_stop_all_queues(vport->netdev); + netif_carrier_off(vport->netdev); + } + } + break; + default: + dev_err(&vport->adapter->pdev->dev, + "Unknown event %d from PF\n", event); + break; + } +} + +/** + * idpf_mb_clean - Reclaim the send mailbox queue entries + * @adapter: Driver specific private structure + * + * Reclaim the send mailbox queue entries to be used to send further messages + * + * Returns 0 on success, negative on failure + */ +static int idpf_mb_clean(struct idpf_adapter *adapter) +{ + u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; + struct idpf_ctlq_msg **q_msg; + struct idpf_dma_mem *dma_mem; + int err; + + q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC); + if (!q_msg) + return -ENOMEM; + + err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); + if (err) + goto err_kfree; + + for (i = 0; i < num_q_msg; i++) { + if (!q_msg[i]) + continue; + dma_mem = q_msg[i]->ctx.indirect.payload; + if (dma_mem) + dma_free_coherent(&adapter->pdev->dev, dma_mem->size, + dma_mem->va, dma_mem->pa); + kfree(q_msg[i]); + kfree(dma_mem); + } + +err_kfree: + kfree(q_msg); + + return err; +} + +/** + * idpf_send_mb_msg - Send message over mailbox + * @adapter: Driver specific private structure + * @op: virtchnl opcode + * @msg_size: size of the payload + * @msg: pointer to buffer holding the payload + * + * Will prepare the control queue message and initiates the send api + * + * Returns 0 on success, negative on failure + */ +int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, + u16 msg_size, u8 *msg) +{ + struct idpf_ctlq_msg *ctlq_msg; + struct idpf_dma_mem *dma_mem; + int err; + + /* If we are here and a reset is detected nothing much can be + * done. This thread should silently abort and expected to + * be corrected with a new run either by user or driver + * flows after reset + */ + if (idpf_is_reset_detected(adapter)) + return 0; + + err = idpf_mb_clean(adapter); + if (err) + return err; + + ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC); + if (!ctlq_msg) + return -ENOMEM; + + dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC); + if (!dma_mem) { + err = -ENOMEM; + goto dma_mem_error; + } + + ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; + ctlq_msg->func_id = 0; + ctlq_msg->data_len = msg_size; + ctlq_msg->cookie.mbx.chnl_opcode = op; + ctlq_msg->cookie.mbx.chnl_retval = 0; + dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; + dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, + &dma_mem->pa, GFP_ATOMIC); + if (!dma_mem->va) { + err = -ENOMEM; + goto dma_alloc_error; + } + memcpy(dma_mem->va, msg, msg_size); + ctlq_msg->ctx.indirect.payload = dma_mem; + + err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); + if (err) + goto send_error; + + return 0; + +send_error: + dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, + dma_mem->pa); +dma_alloc_error: + kfree(dma_mem); +dma_mem_error: + kfree(ctlq_msg); + + return err; +} + +/** + * idpf_find_vport - Find vport pointer from control queue message + * @adapter: driver specific private structure + * @vport: address of vport pointer to copy the vport from adapters vport list + * @ctlq_msg: control queue message + * + * Return 0 on success, error value on failure. Also this function does check + * for the opcodes which expect to receive payload and return error value if + * it is not the case. + */ +static int idpf_find_vport(struct idpf_adapter *adapter, + struct idpf_vport **vport, + struct idpf_ctlq_msg *ctlq_msg) +{ + bool no_op = false, vid_found = false; + int i, err = 0; + char *vc_msg; + u32 v_id; + + vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL); + if (!vc_msg) + return -ENOMEM; + + if (ctlq_msg->data_len) { + size_t payload_size = ctlq_msg->ctx.indirect.payload->size; + + if (!payload_size) { + dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n"); + kfree(vc_msg); + + return -EINVAL; + } + + memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va, + min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN)); + } + + switch (ctlq_msg->cookie.mbx.chnl_opcode) { + case VIRTCHNL2_OP_VERSION: + case VIRTCHNL2_OP_GET_CAPS: + case VIRTCHNL2_OP_CREATE_VPORT: + case VIRTCHNL2_OP_SET_SRIOV_VFS: + case VIRTCHNL2_OP_ALLOC_VECTORS: + case VIRTCHNL2_OP_DEALLOC_VECTORS: + case VIRTCHNL2_OP_GET_PTYPE_INFO: + goto free_vc_msg; + case VIRTCHNL2_OP_ENABLE_VPORT: + case VIRTCHNL2_OP_DISABLE_VPORT: + case VIRTCHNL2_OP_DESTROY_VPORT: + v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_CONFIG_TX_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_CONFIG_RX_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_ENABLE_QUEUES: + case VIRTCHNL2_OP_DISABLE_QUEUES: + case VIRTCHNL2_OP_DEL_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_ADD_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: + case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: + v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_GET_STATS: + v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_GET_RSS_LUT: + case VIRTCHNL2_OP_SET_RSS_LUT: + v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_GET_RSS_KEY: + case VIRTCHNL2_OP_SET_RSS_KEY: + v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_EVENT: + v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_LOOPBACK: + v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: + v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_ADD_MAC_ADDR: + case VIRTCHNL2_OP_DEL_MAC_ADDR: + v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id); + break; + default: + no_op = true; + break; + } + + if (no_op) + goto free_vc_msg; + + for (i = 0; i < idpf_get_max_vports(adapter); i++) { + if (adapter->vport_ids[i] == v_id) { + vid_found = true; + break; + } + } + + if (vid_found) + *vport = adapter->vports[i]; + else + err = -EINVAL; + +free_vc_msg: + kfree(vc_msg); + + return err; +} + +/** + * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer. + * @adapter: driver specific private structure + * @vport: virtual port structure + * @ctlq_msg: msg to copy from + * @err_enum: err bit to set on error + * + * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success, + * negative on failure. + */ +static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter, + struct idpf_vport *vport, + struct idpf_ctlq_msg *ctlq_msg, + enum idpf_vport_vc_state err_enum) +{ + if (ctlq_msg->cookie.mbx.chnl_retval) { + if (vport) + set_bit(err_enum, vport->vc_state); + else + set_bit(err_enum, adapter->vc_state); + + return -EINVAL; + } + + if (vport) + memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va, + min_t(int, ctlq_msg->ctx.indirect.payload->size, + IDPF_CTLQ_MAX_BUF_LEN)); + else + memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va, + min_t(int, ctlq_msg->ctx.indirect.payload->size, + IDPF_CTLQ_MAX_BUF_LEN)); + + return 0; +} + +/** + * idpf_recv_vchnl_op - helper function with common logic when handling the + * reception of VIRTCHNL OPs. + * @adapter: driver specific private structure + * @vport: virtual port structure + * @ctlq_msg: msg to copy from + * @state: state bit used on timeout check + * @err_state: err bit to set on error + */ +static void idpf_recv_vchnl_op(struct idpf_adapter *adapter, + struct idpf_vport *vport, + struct idpf_ctlq_msg *ctlq_msg, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_state) +{ + wait_queue_head_t *vchnl_wq; + int err; + + if (vport) + vchnl_wq = &vport->vchnl_wq; + else + vchnl_wq = &adapter->vchnl_wq; + + err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state); + if (wq_has_sleeper(vchnl_wq)) { + if (vport) + set_bit(state, vport->vc_state); + else + set_bit(state, adapter->vc_state); + + wake_up(vchnl_wq); + } else { + if (!err) { + dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n", + ctlq_msg->cookie.mbx.chnl_opcode); + } else { + /* Clear the errors since there is no sleeper to pass + * them on + */ + if (vport) + clear_bit(err_state, vport->vc_state); + else + clear_bit(err_state, adapter->vc_state); + } + } +} + +/** + * idpf_recv_mb_msg - Receive message over mailbox + * @adapter: Driver specific private structure + * @op: virtchannel operation code + * @msg: Received message holding buffer + * @msg_size: message size + * + * Will receive control queue message and posts the receive buffer. Returns 0 + * on success and negative on failure. + */ +int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, + void *msg, int msg_size) +{ + struct idpf_vport *vport = NULL; + struct idpf_ctlq_msg ctlq_msg; + struct idpf_dma_mem *dma_mem; + bool work_done = false; + int num_retry = 2000; + u16 num_q_msg; + int err; + + while (1) { + struct idpf_vport_config *vport_config; + int payload_size = 0; + + /* Try to get one message */ + num_q_msg = 1; + dma_mem = NULL; + err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg); + /* If no message then decide if we have to retry based on + * opcode + */ + if (err || !num_q_msg) { + /* Increasing num_retry to consider the delayed + * responses because of large number of VF's mailbox + * messages. If the mailbox message is received from + * the other side, we come out of the sleep cycle + * immediately else we wait for more time. + */ + if (!op || !num_retry--) + break; + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { + err = -EIO; + break; + } + msleep(20); + continue; + } + + /* If we are here a message is received. Check if we are looking + * for a specific message based on opcode. If it is different + * ignore and post buffers + */ + if (op && ctlq_msg.cookie.mbx.chnl_opcode != op) + goto post_buffs; + + err = idpf_find_vport(adapter, &vport, &ctlq_msg); + if (err) + goto post_buffs; + + if (ctlq_msg.data_len) + payload_size = ctlq_msg.ctx.indirect.payload->size; + + /* All conditions are met. Either a message requested is + * received or we received a message to be processed + */ + switch (ctlq_msg.cookie.mbx.chnl_opcode) { + case VIRTCHNL2_OP_VERSION: + case VIRTCHNL2_OP_GET_CAPS: + if (ctlq_msg.cookie.mbx.chnl_retval) { + dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n", + ctlq_msg.cookie.mbx.chnl_opcode, + ctlq_msg.cookie.mbx.chnl_retval); + err = -EBADMSG; + } else if (msg) { + memcpy(msg, ctlq_msg.ctx.indirect.payload->va, + min_t(int, payload_size, msg_size)); + } + work_done = true; + break; + case VIRTCHNL2_OP_CREATE_VPORT: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_CREATE_VPORT, + IDPF_VC_CREATE_VPORT_ERR); + break; + case VIRTCHNL2_OP_ENABLE_VPORT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ENA_VPORT, + IDPF_VC_ENA_VPORT_ERR); + break; + case VIRTCHNL2_OP_DISABLE_VPORT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DIS_VPORT, + IDPF_VC_DIS_VPORT_ERR); + break; + case VIRTCHNL2_OP_DESTROY_VPORT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DESTROY_VPORT, + IDPF_VC_DESTROY_VPORT_ERR); + break; + case VIRTCHNL2_OP_CONFIG_TX_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_CONFIG_TXQ, + IDPF_VC_CONFIG_TXQ_ERR); + break; + case VIRTCHNL2_OP_CONFIG_RX_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_CONFIG_RXQ, + IDPF_VC_CONFIG_RXQ_ERR); + break; + case VIRTCHNL2_OP_ENABLE_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ENA_QUEUES, + IDPF_VC_ENA_QUEUES_ERR); + break; + case VIRTCHNL2_OP_DISABLE_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DIS_QUEUES, + IDPF_VC_DIS_QUEUES_ERR); + break; + case VIRTCHNL2_OP_ADD_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ADD_QUEUES, + IDPF_VC_ADD_QUEUES_ERR); + break; + case VIRTCHNL2_OP_DEL_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DEL_QUEUES, + IDPF_VC_DEL_QUEUES_ERR); + break; + case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_MAP_IRQ, + IDPF_VC_MAP_IRQ_ERR); + break; + case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_UNMAP_IRQ, + IDPF_VC_UNMAP_IRQ_ERR); + break; + case VIRTCHNL2_OP_GET_STATS: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_GET_STATS, + IDPF_VC_GET_STATS_ERR); + break; + case VIRTCHNL2_OP_GET_RSS_LUT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_GET_RSS_LUT, + IDPF_VC_GET_RSS_LUT_ERR); + break; + case VIRTCHNL2_OP_SET_RSS_LUT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_SET_RSS_LUT, + IDPF_VC_SET_RSS_LUT_ERR); + break; + case VIRTCHNL2_OP_GET_RSS_KEY: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_GET_RSS_KEY, + IDPF_VC_GET_RSS_KEY_ERR); + break; + case VIRTCHNL2_OP_SET_RSS_KEY: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_SET_RSS_KEY, + IDPF_VC_SET_RSS_KEY_ERR); + break; + case VIRTCHNL2_OP_SET_SRIOV_VFS: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_SET_SRIOV_VFS, + IDPF_VC_SET_SRIOV_VFS_ERR); + break; + case VIRTCHNL2_OP_ALLOC_VECTORS: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_ALLOC_VECTORS, + IDPF_VC_ALLOC_VECTORS_ERR); + break; + case VIRTCHNL2_OP_DEALLOC_VECTORS: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_DEALLOC_VECTORS, + IDPF_VC_DEALLOC_VECTORS_ERR); + break; + case VIRTCHNL2_OP_GET_PTYPE_INFO: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_GET_PTYPE_INFO, + IDPF_VC_GET_PTYPE_INFO_ERR); + break; + case VIRTCHNL2_OP_LOOPBACK: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_LOOPBACK_STATE, + IDPF_VC_LOOPBACK_STATE_ERR); + break; + case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: + /* This message can only be sent asynchronously. As + * such we'll have lost the context in which it was + * called and thus can only really report if it looks + * like an error occurred. Don't bother setting ERR bit + * or waking chnl_wq since no work queue will be waiting + * to read the message. + */ + if (ctlq_msg.cookie.mbx.chnl_retval) { + dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n", + ctlq_msg.cookie.mbx.chnl_retval); + } + break; + case VIRTCHNL2_OP_ADD_MAC_ADDR: + vport_config = adapter->vport_config[vport->idx]; + if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ, + vport_config->flags)) { + /* Message was sent asynchronously. We don't + * normally print errors here, instead + * prefer to handle errors in the function + * calling wait_for_event. However, if + * asynchronous, the context in which the + * message was sent is lost. We can't really do + * anything about at it this point, but we + * should at a minimum indicate that it looks + * like something went wrong. Also don't bother + * setting ERR bit or waking vchnl_wq since no + * one will be waiting to read the async + * message. + */ + if (ctlq_msg.cookie.mbx.chnl_retval) + dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n", + ctlq_msg.cookie.mbx.chnl_retval); + break; + } + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ADD_MAC_ADDR, + IDPF_VC_ADD_MAC_ADDR_ERR); + break; + case VIRTCHNL2_OP_DEL_MAC_ADDR: + vport_config = adapter->vport_config[vport->idx]; + if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ, + vport_config->flags)) { + /* Message was sent asynchronously like the + * VIRTCHNL2_OP_ADD_MAC_ADDR + */ + if (ctlq_msg.cookie.mbx.chnl_retval) + dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n", + ctlq_msg.cookie.mbx.chnl_retval); + break; + } + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DEL_MAC_ADDR, + IDPF_VC_DEL_MAC_ADDR_ERR); + break; + case VIRTCHNL2_OP_EVENT: + idpf_recv_event_msg(vport, &ctlq_msg); + break; + default: + dev_warn(&adapter->pdev->dev, + "Unhandled virtchnl response %d\n", + ctlq_msg.cookie.mbx.chnl_opcode); + break; + } + +post_buffs: + if (ctlq_msg.data_len) + dma_mem = ctlq_msg.ctx.indirect.payload; + else + num_q_msg = 0; + + err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq, + &num_q_msg, &dma_mem); + /* If post failed clear the only buffer we supplied */ + if (err && dma_mem) + dma_free_coherent(&adapter->pdev->dev, dma_mem->size, + dma_mem->va, dma_mem->pa); + + /* Applies only if we are looking for a specific opcode */ + if (work_done) + break; + } + + return err; +} + +/** + * __idpf_wait_for_event - wrapper function for wait on virtchannel response + * @adapter: Driver private data structure + * @vport: virtual port structure + * @state: check on state upon timeout + * @err_check: check if this specific error bit is set + * @timeout: Max time to wait + * + * Checks if state is set upon expiry of timeout. Returns 0 on success, + * negative on failure. + */ +static int __idpf_wait_for_event(struct idpf_adapter *adapter, + struct idpf_vport *vport, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_check, + int timeout) +{ + int time_to_wait, num_waits; + wait_queue_head_t *vchnl_wq; + unsigned long *vc_state; + + time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT); + num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT); + + if (vport) { + vchnl_wq = &vport->vchnl_wq; + vc_state = vport->vc_state; + } else { + vchnl_wq = &adapter->vchnl_wq; + vc_state = adapter->vc_state; + } + + while (num_waits) { + int event; + + /* If we are here and a reset is detected do not wait but + * return. Reset timing is out of drivers control. So + * while we are cleaning resources as part of reset if the + * underlying HW mailbox is gone, wait on mailbox messages + * is not meaningful + */ + if (idpf_is_reset_detected(adapter)) + return 0; + + event = wait_event_timeout(*vchnl_wq, + test_and_clear_bit(state, vc_state), + msecs_to_jiffies(time_to_wait)); + if (event) { + if (test_and_clear_bit(err_check, vc_state)) { + dev_err(&adapter->pdev->dev, "VC response error %s\n", + idpf_vport_vc_state_str[err_check]); + + return -EINVAL; + } + + return 0; + } + num_waits--; + } + + /* Timeout occurred */ + dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n", + idpf_vport_vc_state_str[state]); + + return -ETIMEDOUT; +} + +/** + * idpf_min_wait_for_event - wait for virtchannel response + * @adapter: Driver private data structure + * @vport: virtual port structure + * @state: check on state upon timeout + * @err_check: check if this specific error bit is set + * + * Returns 0 on success, negative on failure. + */ +static int idpf_min_wait_for_event(struct idpf_adapter *adapter, + struct idpf_vport *vport, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_check) +{ + return __idpf_wait_for_event(adapter, vport, state, err_check, + IDPF_WAIT_FOR_EVENT_TIMEO_MIN); +} + +/** + * idpf_wait_for_event - wait for virtchannel response + * @adapter: Driver private data structure + * @vport: virtual port structure + * @state: check on state upon timeout after 500ms + * @err_check: check if this specific error bit is set + * + * Returns 0 on success, negative on failure. + */ +static int idpf_wait_for_event(struct idpf_adapter *adapter, + struct idpf_vport *vport, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_check) +{ + /* Increasing the timeout in __IDPF_INIT_SW flow to consider large + * number of VF's mailbox message responses. When a message is received + * on mailbox, this thread is woken up by the idpf_recv_mb_msg before + * the timeout expires. Only in the error case i.e. if no message is + * received on mailbox, we wait for the complete timeout which is + * less likely to happen. + */ + return __idpf_wait_for_event(adapter, vport, state, err_check, + IDPF_WAIT_FOR_EVENT_TIMEO); +} + +/** + * idpf_wait_for_marker_event - wait for software marker response + * @vport: virtual port data structure + * + * Returns 0 success, negative on failure. + **/ +static int idpf_wait_for_marker_event(struct idpf_vport *vport) +{ + int event; + int i; + + for (i = 0; i < vport->num_txq; i++) + set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags); + + event = wait_event_timeout(vport->sw_marker_wq, + test_and_clear_bit(IDPF_VPORT_SW_MARKER, + vport->flags), + msecs_to_jiffies(500)); + + for (i = 0; i < vport->num_txq; i++) + clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + + if (event) + return 0; + + dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); + + return -ETIMEDOUT; +} + +/** + * idpf_send_ver_msg - send virtchnl version message + * @adapter: Driver specific private structure + * + * Send virtchnl version message. Returns 0 on success, negative on failure. + */ +static int idpf_send_ver_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_version_info vvi; + + if (adapter->virt_ver_maj) { + vvi.major = cpu_to_le32(adapter->virt_ver_maj); + vvi.minor = cpu_to_le32(adapter->virt_ver_min); + } else { + vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); + vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); + } + + return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi), + (u8 *)&vvi); +} + +/** + * idpf_recv_ver_msg - Receive virtchnl version message + * @adapter: Driver specific private structure + * + * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need + * to send version message again, otherwise negative on failure. + */ +static int idpf_recv_ver_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_version_info vvi; + u32 major, minor; + int err; + + err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi, + sizeof(vvi)); + if (err) + return err; + + major = le32_to_cpu(vvi.major); + minor = le32_to_cpu(vvi.minor); + + if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { + dev_warn(&adapter->pdev->dev, + "Virtchnl major version (%d) greater than supported\n", + major); + + return -EINVAL; + } + + if (major == IDPF_VIRTCHNL_VERSION_MAJOR && + minor > IDPF_VIRTCHNL_VERSION_MINOR) + dev_warn(&adapter->pdev->dev, + "Virtchnl minor version (%d) didn't match\n", minor); + + /* If we have a mismatch, resend version to update receiver on what + * version we will use. + */ + if (!adapter->virt_ver_maj && + major != IDPF_VIRTCHNL_VERSION_MAJOR && + minor != IDPF_VIRTCHNL_VERSION_MINOR) + err = -EAGAIN; + + adapter->virt_ver_maj = major; + adapter->virt_ver_min = minor; + + return err; +} + +/** + * idpf_send_get_caps_msg - Send virtchnl get capabilities message + * @adapter: Driver specific private structure + * + * Send virtchl get capabilities message. Returns 0 on success, negative on + * failure. + */ +static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_capabilities caps = { }; + + caps.csum_caps = + cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | + VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | + VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | + VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | + VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | + VIRTCHNL2_CAP_RX_CSUM_GENERIC); + + caps.seg_caps = + cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | + VIRTCHNL2_CAP_SEG_IPV4_UDP | + VIRTCHNL2_CAP_SEG_IPV4_SCTP | + VIRTCHNL2_CAP_SEG_IPV6_TCP | + VIRTCHNL2_CAP_SEG_IPV6_UDP | + VIRTCHNL2_CAP_SEG_IPV6_SCTP | + VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); + + caps.rss_caps = + cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | + VIRTCHNL2_CAP_RSS_IPV4_UDP | + VIRTCHNL2_CAP_RSS_IPV4_SCTP | + VIRTCHNL2_CAP_RSS_IPV4_OTHER | + VIRTCHNL2_CAP_RSS_IPV6_TCP | + VIRTCHNL2_CAP_RSS_IPV6_UDP | + VIRTCHNL2_CAP_RSS_IPV6_SCTP | + VIRTCHNL2_CAP_RSS_IPV6_OTHER); + + caps.hsplit_caps = + cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); + + caps.rsc_caps = + cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | + VIRTCHNL2_CAP_RSC_IPV6_TCP); + + caps.other_caps = + cpu_to_le64(VIRTCHNL2_CAP_SRIOV | + VIRTCHNL2_CAP_MACFILTER | + VIRTCHNL2_CAP_SPLITQ_QSCHED | + VIRTCHNL2_CAP_PROMISC | + VIRTCHNL2_CAP_LOOPBACK); + + return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps), + (u8 *)&caps); +} + +/** + * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message + * @adapter: Driver specific private structure + * + * Receive virtchnl get capabilities message. Returns 0 on success, negative on + * failure. + */ +static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter) +{ + return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps, + sizeof(struct virtchnl2_get_capabilities)); +} + +/** + * idpf_vport_alloc_max_qs - Allocate max queues for a vport + * @adapter: Driver specific private structure + * @max_q: vport max queue structure + */ +int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; + struct virtchnl2_get_capabilities *caps = &adapter->caps; + u16 default_vports = idpf_get_default_vports(adapter); + int max_rx_q, max_tx_q; + + mutex_lock(&adapter->queue_lock); + + max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; + max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; + if (adapter->num_alloc_vports < default_vports) { + max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); + max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); + } else { + max_q->max_rxq = IDPF_MIN_Q; + max_q->max_txq = IDPF_MIN_Q; + } + max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; + max_q->max_complq = max_q->max_txq; + + if (avail_queues->avail_rxq < max_q->max_rxq || + avail_queues->avail_txq < max_q->max_txq || + avail_queues->avail_bufq < max_q->max_bufq || + avail_queues->avail_complq < max_q->max_complq) { + mutex_unlock(&adapter->queue_lock); + + return -EINVAL; + } + + avail_queues->avail_rxq -= max_q->max_rxq; + avail_queues->avail_txq -= max_q->max_txq; + avail_queues->avail_bufq -= max_q->max_bufq; + avail_queues->avail_complq -= max_q->max_complq; + + mutex_unlock(&adapter->queue_lock); + + return 0; +} + +/** + * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport + * @adapter: Driver specific private structure + * @max_q: vport max queue structure + */ +void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct idpf_avail_queue_info *avail_queues; + + mutex_lock(&adapter->queue_lock); + avail_queues = &adapter->avail_queues; + + avail_queues->avail_rxq += max_q->max_rxq; + avail_queues->avail_txq += max_q->max_txq; + avail_queues->avail_bufq += max_q->max_bufq; + avail_queues->avail_complq += max_q->max_complq; + + mutex_unlock(&adapter->queue_lock); +} + +/** + * idpf_init_avail_queues - Initialize available queues on the device + * @adapter: Driver specific private structure + */ +static void idpf_init_avail_queues(struct idpf_adapter *adapter) +{ + struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; + struct virtchnl2_get_capabilities *caps = &adapter->caps; + + avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); + avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); + avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); + avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); +} + +/** + * idpf_get_reg_intr_vecs - Get vector queue register offset + * @vport: virtual port structure + * @reg_vals: Register offsets to store in + * + * Returns number of registers that got populated + */ +int idpf_get_reg_intr_vecs(struct idpf_vport *vport, + struct idpf_vec_regs *reg_vals) +{ + struct virtchnl2_vector_chunks *chunks; + struct idpf_vec_regs reg_val; + u16 num_vchunks, num_vec; + int num_regs = 0, i, j; + + chunks = &vport->adapter->req_vec_chunks->vchunks; + num_vchunks = le16_to_cpu(chunks->num_vchunks); + + for (j = 0; j < num_vchunks; j++) { + struct virtchnl2_vector_chunk *chunk; + u32 dynctl_reg_spacing; + u32 itrn_reg_spacing; + + chunk = &chunks->vchunks[j]; + num_vec = le16_to_cpu(chunk->num_vectors); + reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); + reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); + reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); + + dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); + itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); + + for (i = 0; i < num_vec; i++) { + reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; + reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; + reg_vals[num_regs].itrn_index_spacing = + reg_val.itrn_index_spacing; + + reg_val.dyn_ctl_reg += dynctl_reg_spacing; + reg_val.itrn_reg += itrn_reg_spacing; + num_regs++; + } + } + + return num_regs; +} + +/** + * idpf_vport_get_q_reg - Get the queue registers for the vport + * @reg_vals: register values needing to be set + * @num_regs: amount we expect to fill + * @q_type: queue model + * @chunks: queue regs received over mailbox + * + * This function parses the queue register offsets from the queue register + * chunk information, with a specific queue type and stores it into the array + * passed as an argument. It returns the actual number of queue registers that + * are filled. + */ +static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, + struct virtchnl2_queue_reg_chunks *chunks) +{ + u16 num_chunks = le16_to_cpu(chunks->num_chunks); + int reg_filled = 0, i; + u32 reg_val; + + while (num_chunks--) { + struct virtchnl2_queue_reg_chunk *chunk; + u16 num_q; + + chunk = &chunks->chunks[num_chunks]; + if (le32_to_cpu(chunk->type) != q_type) + continue; + + num_q = le32_to_cpu(chunk->num_queues); + reg_val = le64_to_cpu(chunk->qtail_reg_start); + for (i = 0; i < num_q && reg_filled < num_regs ; i++) { + reg_vals[reg_filled++] = reg_val; + reg_val += le32_to_cpu(chunk->qtail_reg_spacing); + } + } + + return reg_filled; +} + +/** + * __idpf_queue_reg_init - initialize queue registers + * @vport: virtual port structure + * @reg_vals: registers we are initializing + * @num_regs: how many registers there are in total + * @q_type: queue model + * + * Return number of queues that are initialized + */ +static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, + int num_regs, u32 q_type) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_queue *q; + int i, j, k = 0; + + switch (q_type) { + case VIRTCHNL2_QUEUE_TYPE_TX: + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) + tx_qgrp->txqs[j]->tail = + idpf_get_reg_addr(adapter, reg_vals[k]); + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq && k < num_regs; j++, k++) { + q = rx_qgrp->singleq.rxqs[j]; + q->tail = idpf_get_reg_addr(adapter, + reg_vals[k]); + } + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u8 num_bufqs = vport->num_bufqs_per_qgrp; + + for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + q->tail = idpf_get_reg_addr(adapter, + reg_vals[k]); + } + } + break; + default: + break; + } + + return k; +} + +/** + * idpf_queue_reg_init - initialize queue registers + * @vport: virtual port structure + * + * Return 0 on success, negative on failure + */ +int idpf_queue_reg_init(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_params; + struct virtchnl2_queue_reg_chunks *chunks; + struct idpf_vport_config *vport_config; + u16 vport_idx = vport->idx; + int num_regs, ret = 0; + u32 *reg_vals; + + /* We may never deal with more than 256 same type of queues */ + reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); + if (!reg_vals) + return -ENOMEM; + + vport_config = vport->adapter->vport_config[vport_idx]; + if (vport_config->req_qs_chunks) { + struct virtchnl2_add_queues *vc_aq = + (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; + chunks = &vc_aq->chunks; + } else { + vport_params = vport->adapter->vport_params_recvd[vport_idx]; + chunks = &vport_params->chunks; + } + + /* Initialize Tx queue tail register address */ + num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, + VIRTCHNL2_QUEUE_TYPE_TX, + chunks); + if (num_regs < vport->num_txq) { + ret = -EINVAL; + goto free_reg_vals; + } + + num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + VIRTCHNL2_QUEUE_TYPE_TX); + if (num_regs < vport->num_txq) { + ret = -EINVAL; + goto free_reg_vals; + } + + /* Initialize Rx/buffer queue tail register address based on Rx queue + * model + */ + if (idpf_is_queue_model_split(vport->rxq_model)) { + num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, + chunks); + if (num_regs < vport->num_bufq) { + ret = -EINVAL; + goto free_reg_vals; + } + + num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); + if (num_regs < vport->num_bufq) { + ret = -EINVAL; + goto free_reg_vals; + } + } else { + num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, + VIRTCHNL2_QUEUE_TYPE_RX, + chunks); + if (num_regs < vport->num_rxq) { + ret = -EINVAL; + goto free_reg_vals; + } + + num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + VIRTCHNL2_QUEUE_TYPE_RX); + if (num_regs < vport->num_rxq) { + ret = -EINVAL; + goto free_reg_vals; + } + } + +free_reg_vals: + kfree(reg_vals); + + return ret; +} + +/** + * idpf_send_create_vport_msg - Send virtchnl create vport message + * @adapter: Driver specific private structure + * @max_q: vport max queue info + * + * send virtchnl creae vport message + * + * Returns 0 on success, negative on failure + */ +int idpf_send_create_vport_msg(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct virtchnl2_create_vport *vport_msg; + u16 idx = adapter->next_vport; + int err, buf_size; + + buf_size = sizeof(struct virtchnl2_create_vport); + if (!adapter->vport_params_reqd[idx]) { + adapter->vport_params_reqd[idx] = kzalloc(buf_size, + GFP_KERNEL); + if (!adapter->vport_params_reqd[idx]) + return -ENOMEM; + } + + vport_msg = adapter->vport_params_reqd[idx]; + vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); + vport_msg->vport_index = cpu_to_le16(idx); + + if (adapter->req_tx_splitq) + vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + else + vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + + if (adapter->req_rx_splitq) + vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + else + vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + + err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q); + if (err) { + dev_err(&adapter->pdev->dev, "Enough queues are not available"); + + return err; + } + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size, + (u8 *)vport_msg); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT, + IDPF_VC_CREATE_VPORT_ERR); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to receive create vport message"); + + goto rel_lock; + } + + if (!adapter->vport_params_recvd[idx]) { + adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, + GFP_KERNEL); + if (!adapter->vport_params_recvd[idx]) { + err = -ENOMEM; + goto rel_lock; + } + } + + vport_msg = adapter->vport_params_recvd[idx]; + memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_check_supported_desc_ids - Verify we have required descriptor support + * @vport: virtual port structure + * + * Return 0 on success, error on failure + */ +int idpf_check_supported_desc_ids(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_create_vport *vport_msg; + u64 rx_desc_ids, tx_desc_ids; + + vport_msg = adapter->vport_params_recvd[vport->idx]; + + rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); + tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); + + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { + dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); + vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); + } + } else { + if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) + vport->base_rxd = true; + } + + if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) + return 0; + + if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { + dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); + vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); + } + + return 0; +} + +/** + * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message + * @vport: virtual port data structure + * + * Send virtchnl destroy vport message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_destroy_vport_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport v_id; + int err; + + v_id.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT, + sizeof(v_id), (u8 *)&v_id); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT, + IDPF_VC_DESTROY_VPORT_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_enable_vport_msg - Send virtchnl enable vport message + * @vport: virtual port data structure + * + * Send enable vport virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_enable_vport_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport v_id; + int err; + + v_id.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT, + sizeof(v_id), (u8 *)&v_id); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT, + IDPF_VC_ENA_VPORT_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_disable_vport_msg - Send virtchnl disable vport message + * @vport: virtual port data structure + * + * Send disable vport virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_disable_vport_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport v_id; + int err; + + v_id.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT, + sizeof(v_id), (u8 *)&v_id); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT, + IDPF_VC_DIS_VPORT_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message + * @vport: virtual port data structure + * + * Send config tx queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) +{ + struct virtchnl2_config_tx_queues *ctq; + u32 config_sz, chunk_sz, buf_sz; + int totqs, num_msgs, num_chunks; + struct virtchnl2_txq_info *qi; + int err = 0, i, k = 0; + + totqs = vport->num_txq + vport->num_complq; + qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); + if (!qi) + return -ENOMEM; + + /* Populate the queue info buffer with all queue context info */ + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + int j, sched_mode; + + for (j = 0; j < tx_qgrp->num_txq; j++, k++) { + qi[k].queue_id = + cpu_to_le32(tx_qgrp->txqs[j]->q_id); + qi[k].model = + cpu_to_le16(vport->txq_model); + qi[k].type = + cpu_to_le32(tx_qgrp->txqs[j]->q_type); + qi[k].ring_len = + cpu_to_le16(tx_qgrp->txqs[j]->desc_count); + qi[k].dma_ring_addr = + cpu_to_le64(tx_qgrp->txqs[j]->dma); + if (idpf_is_queue_model_split(vport->txq_model)) { + struct idpf_queue *q = tx_qgrp->txqs[j]; + + qi[k].tx_compl_queue_id = + cpu_to_le16(tx_qgrp->complq->q_id); + qi[k].relative_queue_id = cpu_to_le16(j); + + if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) + qi[k].sched_mode = + cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); + else + qi[k].sched_mode = + cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); + } else { + qi[k].sched_mode = + cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); + } + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); + qi[k].model = cpu_to_le16(vport->txq_model); + qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); + qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); + + if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; + else + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; + qi[k].sched_mode = cpu_to_le16(sched_mode); + + k++; + } + + /* Make sure accounting agrees */ + if (k != totqs) { + err = -EINVAL; + goto error; + } + + /* Chunk up the queue contexts into multiple messages to avoid + * sending a control queue message buffer that is too large + */ + config_sz = sizeof(struct virtchnl2_config_tx_queues); + chunk_sz = sizeof(struct virtchnl2_txq_info); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + totqs); + num_msgs = DIV_ROUND_UP(totqs, num_chunks); + + buf_sz = struct_size(ctq, qinfo, num_chunks); + ctq = kzalloc(buf_sz, GFP_KERNEL); + if (!ctq) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(ctq, 0, buf_sz); + ctq->vport_id = cpu_to_le32(vport->vport_id); + ctq->num_qinfo = cpu_to_le16(num_chunks); + memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); + + err = idpf_send_mb_msg(vport->adapter, + VIRTCHNL2_OP_CONFIG_TX_QUEUES, + buf_sz, (u8 *)ctq); + if (err) + goto mbx_error; + + err = idpf_wait_for_event(vport->adapter, vport, + IDPF_VC_CONFIG_TXQ, + IDPF_VC_CONFIG_TXQ_ERR); + if (err) + goto mbx_error; + + k += num_chunks; + totqs -= num_chunks; + num_chunks = min(num_chunks, totqs); + /* Recalculate buffer size */ + buf_sz = struct_size(ctq, qinfo, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(ctq); +error: + kfree(qi); + + return err; +} + +/** + * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message + * @vport: virtual port data structure + * + * Send config rx queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) +{ + struct virtchnl2_config_rx_queues *crq; + u32 config_sz, chunk_sz, buf_sz; + int totqs, num_msgs, num_chunks; + struct virtchnl2_rxq_info *qi; + int err = 0, i, k = 0; + + totqs = vport->num_rxq + vport->num_bufq; + qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); + if (!qi) + return -ENOMEM; + + /* Populate the queue info buffer with all queue context info */ + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + int j; + + if (!idpf_is_queue_model_split(vport->rxq_model)) + goto setup_rxqs; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { + struct idpf_queue *bufq = + &rx_qgrp->splitq.bufq_sets[j].bufq; + + qi[k].queue_id = cpu_to_le32(bufq->q_id); + qi[k].model = cpu_to_le16(vport->rxq_model); + qi[k].type = cpu_to_le32(bufq->q_type); + qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); + qi[k].ring_len = cpu_to_le16(bufq->desc_count); + qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); + qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); + qi[k].buffer_notif_stride = bufq->rx_buf_stride; + qi[k].rx_buffer_low_watermark = + cpu_to_le16(bufq->rx_buffer_low_watermark); + if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) + qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); + } + +setup_rxqs: + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, k++) { + struct idpf_queue *rxq; + + if (!idpf_is_queue_model_split(vport->rxq_model)) { + rxq = rx_qgrp->singleq.rxqs[j]; + goto common_qi_fields; + } + rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; + qi[k].rx_bufq1_id = + cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id); + if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { + qi[k].bufq2_ena = IDPF_BUFQ2_ENA; + qi[k].rx_bufq2_id = + cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); + } + qi[k].rx_buffer_low_watermark = + cpu_to_le16(rxq->rx_buffer_low_watermark); + if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) + qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); + +common_qi_fields: + if (rxq->rx_hsplit_en) { + qi[k].qflags |= + cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); + qi[k].hdr_buffer_size = + cpu_to_le16(rxq->rx_hbuf_size); + } + qi[k].queue_id = cpu_to_le32(rxq->q_id); + qi[k].model = cpu_to_le16(vport->rxq_model); + qi[k].type = cpu_to_le32(rxq->q_type); + qi[k].ring_len = cpu_to_le16(rxq->desc_count); + qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); + qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); + qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); + qi[k].qflags |= + cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); + qi[k].desc_ids = cpu_to_le64(rxq->rxdids); + } + } + + /* Make sure accounting agrees */ + if (k != totqs) { + err = -EINVAL; + goto error; + } + + /* Chunk up the queue contexts into multiple messages to avoid + * sending a control queue message buffer that is too large + */ + config_sz = sizeof(struct virtchnl2_config_rx_queues); + chunk_sz = sizeof(struct virtchnl2_rxq_info); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + totqs); + num_msgs = DIV_ROUND_UP(totqs, num_chunks); + + buf_sz = struct_size(crq, qinfo, num_chunks); + crq = kzalloc(buf_sz, GFP_KERNEL); + if (!crq) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(crq, 0, buf_sz); + crq->vport_id = cpu_to_le32(vport->vport_id); + crq->num_qinfo = cpu_to_le16(num_chunks); + memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); + + err = idpf_send_mb_msg(vport->adapter, + VIRTCHNL2_OP_CONFIG_RX_QUEUES, + buf_sz, (u8 *)crq); + if (err) + goto mbx_error; + + err = idpf_wait_for_event(vport->adapter, vport, + IDPF_VC_CONFIG_RXQ, + IDPF_VC_CONFIG_RXQ_ERR); + if (err) + goto mbx_error; + + k += num_chunks; + totqs -= num_chunks; + num_chunks = min(num_chunks, totqs); + /* Recalculate buffer size */ + buf_sz = struct_size(crq, qinfo, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(crq); +error: + kfree(qi); + + return err; +} + +/** + * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable + * queues message + * @vport: virtual port data structure + * @vc_op: virtchnl op code to send + * + * Send enable or disable queues virtchnl message. Returns 0 on success, + * negative on failure. + */ +static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) +{ + u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_del_ena_dis_queues *eq; + struct virtchnl2_queue_chunks *qcs; + struct virtchnl2_queue_chunk *qc; + u32 config_sz, chunk_sz, buf_sz; + int i, j, k = 0, err = 0; + + /* validate virtchnl op */ + switch (vc_op) { + case VIRTCHNL2_OP_ENABLE_QUEUES: + case VIRTCHNL2_OP_DISABLE_QUEUES: + break; + default: + return -EINVAL; + } + + num_txq = vport->num_txq + vport->num_complq; + num_rxq = vport->num_rxq + vport->num_bufq; + num_q = num_txq + num_rxq; + buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q; + qc = kzalloc(buf_sz, GFP_KERNEL); + if (!qc) + return -ENOMEM; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq; j++, k++) { + qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + } + if (vport->num_txq != k) { + err = -EINVAL; + goto error; + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + goto setup_rx; + + for (i = 0; i < vport->num_txq_grp; i++, k++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + if (vport->num_complq != (k - vport->num_txq)) { + err = -EINVAL; + goto error; + } + +setup_rx: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, k++) { + if (idpf_is_queue_model_split(vport->rxq_model)) { + qc[k].start_queue_id = + cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); + qc[k].type = + cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); + } else { + qc[k].start_queue_id = + cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); + qc[k].type = + cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); + } + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + } + if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) { + err = -EINVAL; + goto error; + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + goto send_msg; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { + struct idpf_queue *q; + + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + qc[k].type = cpu_to_le32(q->q_type); + qc[k].start_queue_id = cpu_to_le32(q->q_id); + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + } + if (vport->num_bufq != k - (vport->num_txq + + vport->num_complq + + vport->num_rxq)) { + err = -EINVAL; + goto error; + } + +send_msg: + /* Chunk up the queue info into multiple messages */ + config_sz = sizeof(struct virtchnl2_del_ena_dis_queues); + chunk_sz = sizeof(struct virtchnl2_queue_chunk); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + num_q); + num_msgs = DIV_ROUND_UP(num_q, num_chunks); + + buf_sz = struct_size(eq, chunks.chunks, num_chunks); + eq = kzalloc(buf_sz, GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(eq, 0, buf_sz); + eq->vport_id = cpu_to_le32(vport->vport_id); + eq->chunks.num_chunks = cpu_to_le16(num_chunks); + qcs = &eq->chunks; + memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); + + err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq); + if (err) + goto mbx_error; + + if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES) + err = idpf_wait_for_event(adapter, vport, + IDPF_VC_ENA_QUEUES, + IDPF_VC_ENA_QUEUES_ERR); + else + err = idpf_min_wait_for_event(adapter, vport, + IDPF_VC_DIS_QUEUES, + IDPF_VC_DIS_QUEUES_ERR); + if (err) + goto mbx_error; + + k += num_chunks; + num_q -= num_chunks; + num_chunks = min(num_chunks, num_q); + /* Recalculate buffer size */ + buf_sz = struct_size(eq, chunks.chunks, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(eq); +error: + kfree(qc); + + return err; +} + +/** + * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue + * vector message + * @vport: virtual port data structure + * @map: true for map and false for unmap + * + * Send map or unmap queue vector virtchnl message. Returns 0 on success, + * negative on failure. + */ +int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_queue_vector_maps *vqvm; + struct virtchnl2_queue_vector *vqv; + u32 config_sz, chunk_sz, buf_sz; + u32 num_msgs, num_chunks, num_q; + int i, j, k = 0, err = 0; + + num_q = vport->num_txq + vport->num_rxq; + + buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q; + vqv = kzalloc(buf_sz, GFP_KERNEL); + if (!vqv) + return -ENOMEM; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq; j++, k++) { + vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); + + if (idpf_is_queue_model_split(vport->txq_model)) { + vqv[k].vector_id = + cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); + vqv[k].itr_idx = + cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); + } else { + vqv[k].vector_id = + cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); + vqv[k].itr_idx = + cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); + } + } + } + + if (vport->num_txq != k) { + err = -EINVAL; + goto error; + } + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, k++) { + struct idpf_queue *rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + rxq = rx_qgrp->singleq.rxqs[j]; + + vqv[k].queue_type = cpu_to_le32(rxq->q_type); + vqv[k].queue_id = cpu_to_le32(rxq->q_id); + vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); + vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); + } + } + + if (idpf_is_queue_model_split(vport->txq_model)) { + if (vport->num_rxq != k - vport->num_complq) { + err = -EINVAL; + goto error; + } + } else { + if (vport->num_rxq != k - vport->num_txq) { + err = -EINVAL; + goto error; + } + } + + /* Chunk up the vector info into multiple messages */ + config_sz = sizeof(struct virtchnl2_queue_vector_maps); + chunk_sz = sizeof(struct virtchnl2_queue_vector); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + num_q); + num_msgs = DIV_ROUND_UP(num_q, num_chunks); + + buf_sz = struct_size(vqvm, qv_maps, num_chunks); + vqvm = kzalloc(buf_sz, GFP_KERNEL); + if (!vqvm) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(vqvm, 0, buf_sz); + vqvm->vport_id = cpu_to_le32(vport->vport_id); + vqvm->num_qv_maps = cpu_to_le16(num_chunks); + memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); + + if (map) { + err = idpf_send_mb_msg(adapter, + VIRTCHNL2_OP_MAP_QUEUE_VECTOR, + buf_sz, (u8 *)vqvm); + if (!err) + err = idpf_wait_for_event(adapter, vport, + IDPF_VC_MAP_IRQ, + IDPF_VC_MAP_IRQ_ERR); + } else { + err = idpf_send_mb_msg(adapter, + VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR, + buf_sz, (u8 *)vqvm); + if (!err) + err = + idpf_min_wait_for_event(adapter, vport, + IDPF_VC_UNMAP_IRQ, + IDPF_VC_UNMAP_IRQ_ERR); + } + if (err) + goto mbx_error; + + k += num_chunks; + num_q -= num_chunks; + num_chunks = min(num_chunks, num_q); + /* Recalculate buffer size */ + buf_sz = struct_size(vqvm, qv_maps, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(vqvm); +error: + kfree(vqv); + + return err; +} + +/** + * idpf_send_enable_queues_msg - send enable queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send enable queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_enable_queues_msg(struct idpf_vport *vport) +{ + return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES); +} + +/** + * idpf_send_disable_queues_msg - send disable queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send disable queues virtchnl message. Returns 0 on success, negative + * on failure. + */ +int idpf_send_disable_queues_msg(struct idpf_vport *vport) +{ + int err, i; + + err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES); + if (err) + return err; + + /* switch to poll mode as interrupts will be disabled after disable + * queues virtchnl message is sent + */ + for (i = 0; i < vport->num_txq; i++) + set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + + /* schedule the napi to receive all the marker packets */ + for (i = 0; i < vport->num_q_vectors; i++) + napi_schedule(&vport->q_vectors[i].napi); + + return idpf_wait_for_marker_event(vport); +} + +/** + * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right + * structure + * @dchunks: Destination chunks to store data to + * @schunks: Source chunks to copy data from + * @num_chunks: number of chunks to copy + */ +static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, + struct virtchnl2_queue_reg_chunk *schunks, + u16 num_chunks) +{ + u16 i; + + for (i = 0; i < num_chunks; i++) { + dchunks[i].type = schunks[i].type; + dchunks[i].start_queue_id = schunks[i].start_queue_id; + dchunks[i].num_queues = schunks[i].num_queues; + } +} + +/** + * idpf_send_delete_queues_msg - send delete queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send delete queues virtchnl message. Return 0 on success, negative on + * failure. + */ +int idpf_send_delete_queues_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_create_vport *vport_params; + struct virtchnl2_queue_reg_chunks *chunks; + struct virtchnl2_del_ena_dis_queues *eq; + struct idpf_vport_config *vport_config; + u16 vport_idx = vport->idx; + int buf_size, err; + u16 num_chunks; + + vport_config = adapter->vport_config[vport_idx]; + if (vport_config->req_qs_chunks) { + struct virtchnl2_add_queues *vc_aq = + (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; + chunks = &vc_aq->chunks; + } else { + vport_params = adapter->vport_params_recvd[vport_idx]; + chunks = &vport_params->chunks; + } + + num_chunks = le16_to_cpu(chunks->num_chunks); + buf_size = struct_size(eq, chunks.chunks, num_chunks); + + eq = kzalloc(buf_size, GFP_KERNEL); + if (!eq) + return -ENOMEM; + + eq->vport_id = cpu_to_le32(vport->vport_id); + eq->chunks.num_chunks = cpu_to_le16(num_chunks); + + idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, + num_chunks); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES, + buf_size, (u8 *)eq); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES, + IDPF_VC_DEL_QUEUES_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + kfree(eq); + + return err; +} + +/** + * idpf_send_config_queues_msg - Send config queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send config queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_config_queues_msg(struct idpf_vport *vport) +{ + int err; + + err = idpf_send_config_tx_queues_msg(vport); + if (err) + return err; + + return idpf_send_config_rx_queues_msg(vport); +} + +/** + * idpf_send_add_queues_msg - Send virtchnl add queues message + * @vport: Virtual port private data structure + * @num_tx_q: number of transmit queues + * @num_complq: number of transmit completion queues + * @num_rx_q: number of receive queues + * @num_rx_bufq: number of receive buffer queues + * + * Returns 0 on success, negative on failure. vport _MUST_ be const here as + * we should not change any fields within vport itself in this function. + */ +int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, + u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + struct virtchnl2_add_queues aq = { }; + struct virtchnl2_add_queues *vc_msg; + u16 vport_idx = vport->idx; + int size, err; + + vport_config = adapter->vport_config[vport_idx]; + + aq.vport_id = cpu_to_le32(vport->vport_id); + aq.num_tx_q = cpu_to_le16(num_tx_q); + aq.num_tx_complq = cpu_to_le16(num_complq); + aq.num_rx_q = cpu_to_le16(num_rx_q); + aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); + + mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES, + sizeof(struct virtchnl2_add_queues), (u8 *)&aq); + if (err) + goto rel_lock; + + /* We want vport to be const to prevent incidental code changes making + * changes to the vport config. We're making a special exception here + * to discard const to use the virtchnl. + */ + err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport, + IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR); + if (err) + goto rel_lock; + + kfree(vport_config->req_qs_chunks); + vport_config->req_qs_chunks = NULL; + + vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg; + /* compare vc_msg num queues with vport num queues */ + if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || + le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || + le16_to_cpu(vc_msg->num_tx_complq) != num_complq || + le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) { + err = -EINVAL; + goto rel_lock; + } + + size = struct_size(vc_msg, chunks.chunks, + le16_to_cpu(vc_msg->chunks.num_chunks)); + vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); + if (!vport_config->req_qs_chunks) { + err = -ENOMEM; + goto rel_lock; + } + +rel_lock: + mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock); + + return err; +} + +/** + * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message + * @adapter: Driver specific private structure + * @num_vectors: number of vectors to be allocated + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) +{ + struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec; + struct virtchnl2_alloc_vectors ac = { }; + u16 num_vchunks; + int size, err; + + ac.num_vectors = cpu_to_le16(num_vectors); + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS, + sizeof(ac), (u8 *)&ac); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS, + IDPF_VC_ALLOC_VECTORS_ERR); + if (err) + goto rel_lock; + + rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg; + num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); + + size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); + if (size > sizeof(adapter->vc_msg)) { + err = -EINVAL; + goto rel_lock; + } + + kfree(adapter->req_vec_chunks); + adapter->req_vec_chunks = NULL; + adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL); + if (!adapter->req_vec_chunks) { + err = -ENOMEM; + goto rel_lock; + } + + alloc_vec = adapter->req_vec_chunks; + if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) { + kfree(adapter->req_vec_chunks); + adapter->req_vec_chunks = NULL; + err = -EINVAL; + } + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message + * @adapter: Driver specific private structure + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; + struct virtchnl2_vector_chunks *vcs = &ac->vchunks; + int buf_size, err; + + buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size, + (u8 *)vcs); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS, + IDPF_VC_DEALLOC_VECTORS_ERR); + if (err) + goto rel_lock; + + kfree(adapter->req_vec_chunks); + adapter->req_vec_chunks = NULL; + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_get_max_vfs - Get max number of vfs supported + * @adapter: Driver specific private structure + * + * Returns max number of VFs + */ +static int idpf_get_max_vfs(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.max_sriov_vfs); +} + +/** + * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message + * @adapter: Driver specific private structure + * @num_vfs: number of virtual functions to be created + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) +{ + struct virtchnl2_sriov_vfs_info svi = { }; + int err; + + svi.num_vfs = cpu_to_le16(num_vfs); + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS, + sizeof(svi), (u8 *)&svi); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS, + IDPF_VC_SET_SRIOV_VFS_ERR); + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_send_get_stats_msg - Send virtchnl get statistics message + * @vport: vport to get stats for + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_get_stats_msg(struct idpf_vport *vport) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + struct rtnl_link_stats64 *netstats = &np->netstats; + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport_stats stats_msg = { }; + struct virtchnl2_vport_stats *stats; + int err; + + /* Don't send get_stats message if the link is down */ + if (np->state <= __IDPF_VPORT_DOWN) + return 0; + + stats_msg.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS, + sizeof(struct virtchnl2_vport_stats), + (u8 *)&stats_msg); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS, + IDPF_VC_GET_STATS_ERR); + if (err) + goto rel_lock; + + stats = (struct virtchnl2_vport_stats *)vport->vc_msg; + + spin_lock_bh(&np->stats_lock); + + netstats->rx_packets = le64_to_cpu(stats->rx_unicast) + + le64_to_cpu(stats->rx_multicast) + + le64_to_cpu(stats->rx_broadcast); + netstats->rx_bytes = le64_to_cpu(stats->rx_bytes); + netstats->rx_dropped = le64_to_cpu(stats->rx_discards); + netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop); + netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length); + + netstats->tx_packets = le64_to_cpu(stats->tx_unicast) + + le64_to_cpu(stats->tx_multicast) + + le64_to_cpu(stats->tx_broadcast); + netstats->tx_bytes = le64_to_cpu(stats->tx_bytes); + netstats->tx_errors = le64_to_cpu(stats->tx_errors); + netstats->tx_dropped = le64_to_cpu(stats->tx_discards); + + vport->port_stats.vport_stats = *stats; + + spin_unlock_bh(&np->stats_lock); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message + * @vport: virtual port data structure + * @get: flag to set or get rss look up table + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_lut *recv_rl; + struct idpf_rss_data *rss_data; + struct virtchnl2_rss_lut *rl; + int buf_size, lut_buf_size; + int i, err; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + buf_size = struct_size(rl, lut, rss_data->rss_lut_size); + rl = kzalloc(buf_size, GFP_KERNEL); + if (!rl) + return -ENOMEM; + + rl->vport_id = cpu_to_le32(vport->vport_id); + mutex_lock(&vport->vc_buf_lock); + + if (!get) { + rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); + for (i = 0; i < rss_data->rss_lut_size; i++) + rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT, + buf_size, (u8 *)rl); + if (err) + goto free_mem; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT, + IDPF_VC_SET_RSS_LUT_ERR); + + goto free_mem; + } + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT, + buf_size, (u8 *)rl); + if (err) + goto free_mem; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT, + IDPF_VC_GET_RSS_LUT_ERR); + if (err) + goto free_mem; + + recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg; + if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) + goto do_memcpy; + + rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); + kfree(rss_data->rss_lut); + + lut_buf_size = rss_data->rss_lut_size * sizeof(u32); + rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); + if (!rss_data->rss_lut) { + rss_data->rss_lut_size = 0; + err = -ENOMEM; + goto free_mem; + } + +do_memcpy: + memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size); +free_mem: + mutex_unlock(&vport->vc_buf_lock); + kfree(rl); + + return err; +} + +/** + * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message + * @vport: virtual port data structure + * @get: flag to set or get rss look up table + * + * Returns 0 on success, negative on failure + */ +int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_key *recv_rk; + struct idpf_rss_data *rss_data; + struct virtchnl2_rss_key *rk; + int i, buf_size, err; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); + rk = kzalloc(buf_size, GFP_KERNEL); + if (!rk) + return -ENOMEM; + + rk->vport_id = cpu_to_le32(vport->vport_id); + mutex_lock(&vport->vc_buf_lock); + + if (get) { + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY, + buf_size, (u8 *)rk); + if (err) + goto error; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY, + IDPF_VC_GET_RSS_KEY_ERR); + if (err) + goto error; + + recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg; + if (rss_data->rss_key_size != + le16_to_cpu(recv_rk->key_len)) { + rss_data->rss_key_size = + min_t(u16, NETDEV_RSS_KEY_LEN, + le16_to_cpu(recv_rk->key_len)); + kfree(rss_data->rss_key); + rss_data->rss_key = kzalloc(rss_data->rss_key_size, + GFP_KERNEL); + if (!rss_data->rss_key) { + rss_data->rss_key_size = 0; + err = -ENOMEM; + goto error; + } + } + memcpy(rss_data->rss_key, recv_rk->key_flex, + rss_data->rss_key_size); + } else { + rk->key_len = cpu_to_le16(rss_data->rss_key_size); + for (i = 0; i < rss_data->rss_key_size; i++) + rk->key_flex[i] = rss_data->rss_key[i]; + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY, + buf_size, (u8 *)rk); + if (err) + goto error; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY, + IDPF_VC_SET_RSS_KEY_ERR); + } + +error: + mutex_unlock(&vport->vc_buf_lock); + kfree(rk); + + return err; +} + +/** + * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table + * @ptype: ptype lookup table + * @pstate: state machine for ptype lookup table + * @ipv4: ipv4 or ipv6 + * @frag: fragmentation allowed + * + */ +static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, + struct idpf_ptype_state *pstate, + bool ipv4, bool frag) +{ + if (!pstate->outer_ip || !pstate->outer_frag) { + ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP; + pstate->outer_ip = true; + + if (ipv4) + ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; + else + ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; + + if (frag) { + ptype->outer_frag = IDPF_RX_PTYPE_FRAG; + pstate->outer_frag = true; + } + } else { + ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; + pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; + + if (ipv4) + ptype->tunnel_end_prot = + IDPF_RX_PTYPE_TUNNEL_END_IPV4; + else + ptype->tunnel_end_prot = + IDPF_RX_PTYPE_TUNNEL_END_IPV6; + + if (frag) + ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; + } +} + +/** + * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info + * @vport: virtual port data structure + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) +{ + struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; + struct virtchnl2_get_ptype_info get_ptype_info; + int max_ptype, ptypes_recvd = 0, ptype_offset; + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_get_ptype_info *ptype_info; + u16 next_ptype_id = 0; + int err = 0, i, j, k; + + if (idpf_is_queue_model_split(vport->rxq_model)) + max_ptype = IDPF_RX_MAX_PTYPE; + else + max_ptype = IDPF_RX_MAX_BASE_PTYPE; + + memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); + + ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!ptype_info) + return -ENOMEM; + + mutex_lock(&adapter->vc_buf_lock); + + while (next_ptype_id < max_ptype) { + get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id); + + if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) + get_ptype_info.num_ptypes = + cpu_to_le16(max_ptype - next_ptype_id); + else + get_ptype_info.num_ptypes = + cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, + sizeof(struct virtchnl2_get_ptype_info), + (u8 *)&get_ptype_info); + if (err) + goto vc_buf_unlock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO, + IDPF_VC_GET_PTYPE_INFO_ERR); + if (err) + goto vc_buf_unlock; + + memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); + + ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); + if (ptypes_recvd > max_ptype) { + err = -EINVAL; + goto vc_buf_unlock; + } + + next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) + + le16_to_cpu(get_ptype_info.num_ptypes); + + ptype_offset = IDPF_RX_PTYPE_HDR_SZ; + + for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { + struct idpf_ptype_state pstate = { }; + struct virtchnl2_ptype *ptype; + u16 id; + + ptype = (struct virtchnl2_ptype *) + ((u8 *)ptype_info + ptype_offset); + + ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); + if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) { + err = -EINVAL; + goto vc_buf_unlock; + } + + /* 0xFFFF indicates end of ptypes */ + if (le16_to_cpu(ptype->ptype_id_10) == + IDPF_INVALID_PTYPE_ID) { + err = 0; + goto vc_buf_unlock; + } + + if (idpf_is_queue_model_split(vport->rxq_model)) + k = le16_to_cpu(ptype->ptype_id_10); + else + k = ptype->ptype_id_8; + + if (ptype->proto_id_count) + ptype_lkup[k].known = 1; + + for (j = 0; j < ptype->proto_id_count; j++) { + id = le16_to_cpu(ptype->proto_id[j]); + switch (id) { + case VIRTCHNL2_PROTO_HDR_GRE: + if (pstate.tunnel_state == + IDPF_PTYPE_TUNNEL_IP) { + ptype_lkup[k].tunnel_type = + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; + pstate.tunnel_state |= + IDPF_PTYPE_TUNNEL_IP_GRENAT; + } + break; + case VIRTCHNL2_PROTO_HDR_MAC: + ptype_lkup[k].outer_ip = + IDPF_RX_PTYPE_OUTER_L2; + if (pstate.tunnel_state == + IDPF_TUN_IP_GRE) { + ptype_lkup[k].tunnel_type = + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; + pstate.tunnel_state |= + IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; + } + break; + case VIRTCHNL2_PROTO_HDR_IPV4: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, true, + false); + break; + case VIRTCHNL2_PROTO_HDR_IPV6: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, false, + false); + break; + case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, true, + true); + break; + case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, false, + true); + break; + case VIRTCHNL2_PROTO_HDR_UDP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_UDP; + break; + case VIRTCHNL2_PROTO_HDR_TCP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_TCP; + break; + case VIRTCHNL2_PROTO_HDR_SCTP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_SCTP; + break; + case VIRTCHNL2_PROTO_HDR_ICMP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_ICMP; + break; + case VIRTCHNL2_PROTO_HDR_PAY: + ptype_lkup[k].payload_layer = + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; + break; + case VIRTCHNL2_PROTO_HDR_ICMPV6: + case VIRTCHNL2_PROTO_HDR_IPV6_EH: + case VIRTCHNL2_PROTO_HDR_PRE_MAC: + case VIRTCHNL2_PROTO_HDR_POST_MAC: + case VIRTCHNL2_PROTO_HDR_ETHERTYPE: + case VIRTCHNL2_PROTO_HDR_SVLAN: + case VIRTCHNL2_PROTO_HDR_CVLAN: + case VIRTCHNL2_PROTO_HDR_MPLS: + case VIRTCHNL2_PROTO_HDR_MMPLS: + case VIRTCHNL2_PROTO_HDR_PTP: + case VIRTCHNL2_PROTO_HDR_CTRL: + case VIRTCHNL2_PROTO_HDR_LLDP: + case VIRTCHNL2_PROTO_HDR_ARP: + case VIRTCHNL2_PROTO_HDR_ECP: + case VIRTCHNL2_PROTO_HDR_EAPOL: + case VIRTCHNL2_PROTO_HDR_PPPOD: + case VIRTCHNL2_PROTO_HDR_PPPOE: + case VIRTCHNL2_PROTO_HDR_IGMP: + case VIRTCHNL2_PROTO_HDR_AH: + case VIRTCHNL2_PROTO_HDR_ESP: + case VIRTCHNL2_PROTO_HDR_IKE: + case VIRTCHNL2_PROTO_HDR_NATT_KEEP: + case VIRTCHNL2_PROTO_HDR_L2TPV2: + case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: + case VIRTCHNL2_PROTO_HDR_L2TPV3: + case VIRTCHNL2_PROTO_HDR_GTP: + case VIRTCHNL2_PROTO_HDR_GTP_EH: + case VIRTCHNL2_PROTO_HDR_GTPCV2: + case VIRTCHNL2_PROTO_HDR_GTPC_TEID: + case VIRTCHNL2_PROTO_HDR_GTPU: + case VIRTCHNL2_PROTO_HDR_GTPU_UL: + case VIRTCHNL2_PROTO_HDR_GTPU_DL: + case VIRTCHNL2_PROTO_HDR_ECPRI: + case VIRTCHNL2_PROTO_HDR_VRRP: + case VIRTCHNL2_PROTO_HDR_OSPF: + case VIRTCHNL2_PROTO_HDR_TUN: + case VIRTCHNL2_PROTO_HDR_NVGRE: + case VIRTCHNL2_PROTO_HDR_VXLAN: + case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: + case VIRTCHNL2_PROTO_HDR_GENEVE: + case VIRTCHNL2_PROTO_HDR_NSH: + case VIRTCHNL2_PROTO_HDR_QUIC: + case VIRTCHNL2_PROTO_HDR_PFCP: + case VIRTCHNL2_PROTO_HDR_PFCP_NODE: + case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: + case VIRTCHNL2_PROTO_HDR_RTP: + case VIRTCHNL2_PROTO_HDR_NO_PROTO: + break; + default: + break; + } + } + } + } + +vc_buf_unlock: + mutex_unlock(&adapter->vc_buf_lock); + kfree(ptype_info); + + return err; +} + +/** + * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback + * message + * @vport: virtual port data structure + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) +{ + struct virtchnl2_loopback loopback; + int err; + + loopback.vport_id = cpu_to_le32(vport->vport_id); + loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK, + sizeof(loopback), (u8 *)&loopback); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(vport->adapter, vport, + IDPF_VC_LOOPBACK_STATE, + IDPF_VC_LOOPBACK_STATE_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_find_ctlq - Given a type and id, find ctlq info + * @hw: hardware struct + * @type: type of ctrlq to find + * @id: ctlq id to find + * + * Returns pointer to found ctlq info struct, NULL otherwise. + */ +static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, + enum idpf_ctlq_type type, int id) +{ + struct idpf_ctlq_info *cq, *tmp; + + list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) + if (cq->q_id == id && cq->cq_type == type) + return cq; + + return NULL; +} + +/** + * idpf_init_dflt_mbx - Setup default mailbox parameters and make request + * @adapter: adapter info struct + * + * Returns 0 on success, negative otherwise + */ +int idpf_init_dflt_mbx(struct idpf_adapter *adapter) +{ + struct idpf_ctlq_create_info ctlq_info[] = { + { + .type = IDPF_CTLQ_TYPE_MAILBOX_TX, + .id = IDPF_DFLT_MBX_ID, + .len = IDPF_DFLT_MBX_Q_LEN, + .buf_size = IDPF_CTLQ_MAX_BUF_LEN + }, + { + .type = IDPF_CTLQ_TYPE_MAILBOX_RX, + .id = IDPF_DFLT_MBX_ID, + .len = IDPF_DFLT_MBX_Q_LEN, + .buf_size = IDPF_CTLQ_MAX_BUF_LEN + } + }; + struct idpf_hw *hw = &adapter->hw; + int err; + + adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); + + err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); + if (err) + return err; + + hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, + IDPF_DFLT_MBX_ID); + hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, + IDPF_DFLT_MBX_ID); + + if (!hw->asq || !hw->arq) { + idpf_ctlq_deinit(hw); + + return -ENOENT; + } + + adapter->state = __IDPF_STARTUP; + + return 0; +} + +/** + * idpf_deinit_dflt_mbx - Free up ctlqs setup + * @adapter: Driver specific private data structure + */ +void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) +{ + if (adapter->hw.arq && adapter->hw.asq) { + idpf_mb_clean(adapter); + idpf_ctlq_deinit(&adapter->hw); + } + adapter->hw.arq = NULL; + adapter->hw.asq = NULL; +} + +/** + * idpf_vport_params_buf_rel - Release memory for MailBox resources + * @adapter: Driver specific private data structure + * + * Will release memory to hold the vport parameters received on MailBox + */ +static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) +{ + kfree(adapter->vport_params_recvd); + adapter->vport_params_recvd = NULL; + kfree(adapter->vport_params_reqd); + adapter->vport_params_reqd = NULL; + kfree(adapter->vport_ids); + adapter->vport_ids = NULL; +} + +/** + * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources + * @adapter: Driver specific private data structure + * + * Will alloc memory to hold the vport parameters received on MailBox + */ +static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) +{ + u16 num_max_vports = idpf_get_max_vports(adapter); + + adapter->vport_params_reqd = kcalloc(num_max_vports, + sizeof(*adapter->vport_params_reqd), + GFP_KERNEL); + if (!adapter->vport_params_reqd) + return -ENOMEM; + + adapter->vport_params_recvd = kcalloc(num_max_vports, + sizeof(*adapter->vport_params_recvd), + GFP_KERNEL); + if (!adapter->vport_params_recvd) + goto err_mem; + + adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); + if (!adapter->vport_ids) + goto err_mem; + + if (adapter->vport_config) + return 0; + + adapter->vport_config = kcalloc(num_max_vports, + sizeof(*adapter->vport_config), + GFP_KERNEL); + if (!adapter->vport_config) + goto err_mem; + + return 0; + +err_mem: + idpf_vport_params_buf_rel(adapter); + + return -ENOMEM; +} + +/** + * idpf_vc_core_init - Initialize state machine and get driver specific + * resources + * @adapter: Driver specific private structure + * + * This function will initialize the state machine and request all necessary + * resources required by the device driver. Once the state machine is + * initialized, allocate memory to store vport specific information and also + * requests required interrupts. + * + * Returns 0 on success, -EAGAIN function will get called again, + * otherwise negative on failure. + */ +int idpf_vc_core_init(struct idpf_adapter *adapter) +{ + int task_delay = 30; + u16 num_max_vports; + int err = 0; + + while (adapter->state != __IDPF_INIT_SW) { + switch (adapter->state) { + case __IDPF_STARTUP: + if (idpf_send_ver_msg(adapter)) + goto init_failed; + adapter->state = __IDPF_VER_CHECK; + goto restart; + case __IDPF_VER_CHECK: + err = idpf_recv_ver_msg(adapter); + if (err == -EIO) { + return err; + } else if (err == -EAGAIN) { + adapter->state = __IDPF_STARTUP; + goto restart; + } else if (err) { + goto init_failed; + } + if (idpf_send_get_caps_msg(adapter)) + goto init_failed; + adapter->state = __IDPF_GET_CAPS; + goto restart; + case __IDPF_GET_CAPS: + if (idpf_recv_get_caps_msg(adapter)) + goto init_failed; + adapter->state = __IDPF_INIT_SW; + break; + default: + dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", + adapter->state); + goto init_failed; + } + break; +restart: + /* Give enough time before proceeding further with + * state machine + */ + msleep(task_delay); + } + + pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); + num_max_vports = idpf_get_max_vports(adapter); + adapter->max_vports = num_max_vports; + adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), + GFP_KERNEL); + if (!adapter->vports) + return -ENOMEM; + + if (!adapter->netdevs) { + adapter->netdevs = kcalloc(num_max_vports, + sizeof(struct net_device *), + GFP_KERNEL); + if (!adapter->netdevs) { + err = -ENOMEM; + goto err_netdev_alloc; + } + } + + err = idpf_vport_params_buf_alloc(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", + err); + goto err_netdev_alloc; + } + + /* Start the mailbox task before requesting vectors. This will ensure + * vector information response from mailbox is handled + */ + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); + + queue_delayed_work(adapter->serv_wq, &adapter->serv_task, + msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); + + err = idpf_intr_req(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", + err); + goto err_intr_req; + } + + idpf_init_avail_queues(adapter); + + /* Skew the delay for init tasks for each function based on fn number + * to prevent every function from making the same call simultaneously. + */ + queue_delayed_work(adapter->init_wq, &adapter->init_task, + msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); + + goto no_err; + +err_intr_req: + cancel_delayed_work_sync(&adapter->serv_task); + cancel_delayed_work_sync(&adapter->mbx_task); + idpf_vport_params_buf_rel(adapter); +err_netdev_alloc: + kfree(adapter->vports); + adapter->vports = NULL; +no_err: + return err; + +init_failed: + /* Don't retry if we're trying to go down, just bail. */ + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + return err; + + if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { + dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); + + return -EFAULT; + } + /* If it reached here, it is possible that mailbox queue initialization + * register writes might not have taken effect. Retry to initialize + * the mailbox again + */ + adapter->state = __IDPF_STARTUP; + idpf_deinit_dflt_mbx(adapter); + set_bit(IDPF_HR_DRV_LOAD, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, + msecs_to_jiffies(task_delay)); + + return -EAGAIN; +} + +/** + * idpf_vc_core_deinit - Device deinit routine + * @adapter: Driver specific private structure + * + */ +void idpf_vc_core_deinit(struct idpf_adapter *adapter) +{ + int i; + + idpf_deinit_task(adapter); + idpf_intr_rel(adapter); + /* Set all bits as we dont know on which vc_state the vhnl_wq is + * waiting on and wakeup the virtchnl workqueue even if it is waiting + * for the response as we are going down + */ + for (i = 0; i < IDPF_VC_NBITS; i++) + set_bit(i, adapter->vc_state); + wake_up(&adapter->vchnl_wq); + + cancel_delayed_work_sync(&adapter->serv_task); + cancel_delayed_work_sync(&adapter->mbx_task); + + idpf_vport_params_buf_rel(adapter); + + /* Clear all the bits */ + for (i = 0; i < IDPF_VC_NBITS; i++) + clear_bit(i, adapter->vc_state); + + kfree(adapter->vports); + adapter->vports = NULL; +} + +/** + * idpf_vport_alloc_vec_indexes - Get relative vector indexes + * @vport: virtual port data struct + * + * This function requests the vector information required for the vport and + * stores the vector indexes received from the 'global vector distribution' + * in the vport's queue vectors array. + * + * Return 0 on success, error on failure + */ +int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) +{ + struct idpf_vector_info vec_info; + int num_alloc_vecs; + + vec_info.num_curr_vecs = vport->num_q_vectors; + vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); + vec_info.default_vport = vport->default_vport; + vec_info.index = vport->idx; + + num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, + vport->q_vector_idxs, + &vec_info); + if (num_alloc_vecs <= 0) { + dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", + num_alloc_vecs); + return -EINVAL; + } + + vport->num_q_vectors = num_alloc_vecs; + + return 0; +} + +/** + * idpf_vport_init - Initialize virtual port + * @vport: virtual port to be initialized + * @max_q: vport max queue info + * + * Will initialize vport with the info received through MB earlier + */ +void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_create_vport *vport_msg; + struct idpf_vport_config *vport_config; + u16 tx_itr[] = {2, 8, 64, 128, 256}; + u16 rx_itr[] = {2, 8, 32, 96, 128}; + struct idpf_rss_data *rss_data; + u16 idx = vport->idx; + + vport_config = adapter->vport_config[idx]; + rss_data = &vport_config->user_config.rss_data; + vport_msg = adapter->vport_params_recvd[idx]; + + vport_config->max_q.max_txq = max_q->max_txq; + vport_config->max_q.max_rxq = max_q->max_rxq; + vport_config->max_q.max_complq = max_q->max_complq; + vport_config->max_q.max_bufq = max_q->max_bufq; + + vport->txq_model = le16_to_cpu(vport_msg->txq_model); + vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); + vport->vport_type = le16_to_cpu(vport_msg->vport_type); + vport->vport_id = le32_to_cpu(vport_msg->vport_id); + + rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, + le16_to_cpu(vport_msg->rss_key_size)); + rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); + + ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); + vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; + + /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ + memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); + memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); + + idpf_vport_init_num_qs(vport, vport_msg); + idpf_vport_calc_num_q_desc(vport); + idpf_vport_calc_num_q_groups(vport); + idpf_vport_alloc_vec_indexes(vport); + + vport->crc_enable = adapter->crc_enable; +} + +/** + * idpf_get_vec_ids - Initialize vector id from Mailbox parameters + * @adapter: adapter structure to get the mailbox vector id + * @vecids: Array of vector ids + * @num_vecids: number of vector ids + * @chunks: vector ids received over mailbox + * + * Will initialize the mailbox vector id which is received from the + * get capabilities and data queue vector ids with ids received as + * mailbox parameters. + * Returns number of ids filled + */ +int idpf_get_vec_ids(struct idpf_adapter *adapter, + u16 *vecids, int num_vecids, + struct virtchnl2_vector_chunks *chunks) +{ + u16 num_chunks = le16_to_cpu(chunks->num_vchunks); + int num_vecid_filled = 0; + int i, j; + + vecids[num_vecid_filled] = adapter->mb_vector.v_idx; + num_vecid_filled++; + + for (j = 0; j < num_chunks; j++) { + struct virtchnl2_vector_chunk *chunk; + u16 start_vecid, num_vec; + + chunk = &chunks->vchunks[j]; + num_vec = le16_to_cpu(chunk->num_vectors); + start_vecid = le16_to_cpu(chunk->start_vector_id); + + for (i = 0; i < num_vec; i++) { + if ((num_vecid_filled + i) < num_vecids) { + vecids[num_vecid_filled + i] = start_vecid; + start_vecid++; + } else { + break; + } + } + num_vecid_filled = num_vecid_filled + i; + } + + return num_vecid_filled; +} + +/** + * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters + * @qids: Array of queue ids + * @num_qids: number of queue ids + * @q_type: queue model + * @chunks: queue ids received over mailbox + * + * Will initialize all queue ids with ids received as mailbox parameters + * Returns number of ids filled + */ +static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, + struct virtchnl2_queue_reg_chunks *chunks) +{ + u16 num_chunks = le16_to_cpu(chunks->num_chunks); + u32 num_q_id_filled = 0, i; + u32 start_q_id, num_q; + + while (num_chunks--) { + struct virtchnl2_queue_reg_chunk *chunk; + + chunk = &chunks->chunks[num_chunks]; + if (le32_to_cpu(chunk->type) != q_type) + continue; + + num_q = le32_to_cpu(chunk->num_queues); + start_q_id = le32_to_cpu(chunk->start_queue_id); + + for (i = 0; i < num_q; i++) { + if ((num_q_id_filled + i) < num_qids) { + qids[num_q_id_filled + i] = start_q_id; + start_q_id++; + } else { + break; + } + } + num_q_id_filled = num_q_id_filled + i; + } + + return num_q_id_filled; +} + +/** + * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters + * @vport: virtual port for which the queues ids are initialized + * @qids: queue ids + * @num_qids: number of queue ids + * @q_type: type of queue + * + * Will initialize all queue ids with ids received as mailbox + * parameters. Returns number of queue ids initialized. + */ +static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, + const u32 *qids, + int num_qids, + u32 q_type) +{ + struct idpf_queue *q; + int i, j, k = 0; + + switch (q_type) { + case VIRTCHNL2_QUEUE_TYPE_TX: + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { + tx_qgrp->txqs[j]->q_id = qids[k]; + tx_qgrp->txqs[j]->q_type = + VIRTCHNL2_QUEUE_TYPE_TX; + } + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq && k < num_qids; j++, k++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + q->q_id = qids[k]; + q->q_type = VIRTCHNL2_QUEUE_TYPE_RX; + } + } + break; + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: + for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + tx_qgrp->complq->q_id = qids[k]; + tx_qgrp->complq->q_type = + VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u8 num_bufqs = vport->num_bufqs_per_qgrp; + + for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + q->q_id = qids[k]; + q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; + } + } + break; + default: + break; + } + + return k; +} + +/** + * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters + * @vport: virtual port for which the queues ids are initialized + * + * Will initialize all queue ids with ids received as mailbox parameters. + * Returns 0 on success, negative if all the queues are not initialized. + */ +int idpf_vport_queue_ids_init(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_params; + struct virtchnl2_queue_reg_chunks *chunks; + struct idpf_vport_config *vport_config; + u16 vport_idx = vport->idx; + int num_ids, err = 0; + u16 q_type; + u32 *qids; + + vport_config = vport->adapter->vport_config[vport_idx]; + if (vport_config->req_qs_chunks) { + struct virtchnl2_add_queues *vc_aq = + (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; + chunks = &vc_aq->chunks; + } else { + vport_params = vport->adapter->vport_params_recvd[vport_idx]; + chunks = &vport_params->chunks; + } + + qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL); + if (!qids) + return -ENOMEM; + + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, + VIRTCHNL2_QUEUE_TYPE_TX, + chunks); + if (num_ids < vport->num_txq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, + VIRTCHNL2_QUEUE_TYPE_TX); + if (num_ids < vport->num_txq) { + err = -EINVAL; + goto mem_rel; + } + + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, + VIRTCHNL2_QUEUE_TYPE_RX, + chunks); + if (num_ids < vport->num_rxq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, + VIRTCHNL2_QUEUE_TYPE_RX); + if (num_ids < vport->num_rxq) { + err = -EINVAL; + goto mem_rel; + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + goto check_rxq; + + q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); + if (num_ids < vport->num_complq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); + if (num_ids < vport->num_complq) { + err = -EINVAL; + goto mem_rel; + } + +check_rxq: + if (!idpf_is_queue_model_split(vport->rxq_model)) + goto mem_rel; + + q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); + if (num_ids < vport->num_bufq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); + if (num_ids < vport->num_bufq) + err = -EINVAL; + +mem_rel: + kfree(qids); + + return err; +} + +/** + * idpf_vport_adjust_qs - Adjust to new requested queues + * @vport: virtual port data struct + * + * Renegotiate queues. Returns 0 on success, negative on failure. + */ +int idpf_vport_adjust_qs(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport vport_msg; + int err; + + vport_msg.txq_model = cpu_to_le16(vport->txq_model); + vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); + err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, + NULL); + if (err) + return err; + + idpf_vport_init_num_qs(vport, &vport_msg); + idpf_vport_calc_num_q_groups(vport); + + return 0; +} + +/** + * idpf_is_capability_ena - Default implementation of capability checking + * @adapter: Private data struct + * @all: all or one flag + * @field: caps field to check for flags + * @flag: flag to check + * + * Return true if all capabilities are supported, false otherwise + */ +bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, + enum idpf_cap_field field, u64 flag) +{ + u8 *caps = (u8 *)&adapter->caps; + u32 *cap_field; + + if (!caps) + return false; + + if (field == IDPF_BASE_CAPS) + return false; + + cap_field = (u32 *)(caps + field); + + if (all) + return (*cap_field & flag) == flag; + else + return !!(*cap_field & flag); +} + +/** + * idpf_get_vport_id: Get vport id + * @vport: virtual port structure + * + * Return vport id from the adapter persistent data + */ +u32 idpf_get_vport_id(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + + return le32_to_cpu(vport_msg->vport_id); +} + +/** + * idpf_add_del_mac_filters - Add/del mac filters + * @vport: Virtual port data structure + * @np: Netdev private structure + * @add: Add or delete flag + * @async: Don't wait for return message + * + * Returns 0 on success, error on failure. + **/ +int idpf_add_del_mac_filters(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + bool add, bool async) +{ + struct virtchnl2_mac_addr_list *ma_list = NULL; + struct idpf_adapter *adapter = np->adapter; + struct idpf_vport_config *vport_config; + enum idpf_vport_config_flags mac_flag; + struct pci_dev *pdev = adapter->pdev; + enum idpf_vport_vc_state vc, vc_err; + struct virtchnl2_mac_addr *mac_addr; + struct idpf_mac_filter *f, *tmp; + u32 num_msgs, total_filters = 0; + int i = 0, k, err = 0; + u32 vop; + + vport_config = adapter->vport_config[np->vport_idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + /* Find the number of newly added filters */ + list_for_each_entry(f, &vport_config->user_config.mac_filter_list, + list) { + if (add && f->add) + total_filters++; + else if (!add && f->remove) + total_filters++; + } + + if (!total_filters) { + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; + } + + /* Fill all the new filters into virtchannel message */ + mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), + GFP_ATOMIC); + if (!mac_addr) { + err = -ENOMEM; + spin_unlock_bh(&vport_config->mac_filter_list_lock); + goto error; + } + + list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list, + list) { + if (add && f->add) { + ether_addr_copy(mac_addr[i].addr, f->macaddr); + i++; + f->add = false; + if (i == total_filters) + break; + } + if (!add && f->remove) { + ether_addr_copy(mac_addr[i].addr, f->macaddr); + i++; + f->remove = false; + if (i == total_filters) + break; + } + } + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + if (add) { + vop = VIRTCHNL2_OP_ADD_MAC_ADDR; + vc = IDPF_VC_ADD_MAC_ADDR; + vc_err = IDPF_VC_ADD_MAC_ADDR_ERR; + mac_flag = IDPF_VPORT_ADD_MAC_REQ; + } else { + vop = VIRTCHNL2_OP_DEL_MAC_ADDR; + vc = IDPF_VC_DEL_MAC_ADDR; + vc_err = IDPF_VC_DEL_MAC_ADDR_ERR; + mac_flag = IDPF_VPORT_DEL_MAC_REQ; + } + + /* Chunk up the filters into multiple messages to avoid + * sending a control queue message buffer that is too large + */ + num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); + + if (!async) + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + u32 entries_size, buf_size, num_entries; + + num_entries = min_t(u32, total_filters, + IDPF_NUM_FILTERS_PER_MSG); + entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; + buf_size = struct_size(ma_list, mac_addr_list, num_entries); + + if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { + kfree(ma_list); + ma_list = kzalloc(buf_size, GFP_ATOMIC); + if (!ma_list) { + err = -ENOMEM; + goto list_prep_error; + } + } else { + memset(ma_list, 0, buf_size); + } + + ma_list->vport_id = cpu_to_le32(np->vport_id); + ma_list->num_mac_addr = cpu_to_le16(num_entries); + memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); + + if (async) + set_bit(mac_flag, vport_config->flags); + + err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list); + if (err) + goto mbx_error; + + if (!async) { + err = idpf_wait_for_event(adapter, vport, vc, vc_err); + if (err) + goto mbx_error; + } + + k += num_entries; + total_filters -= num_entries; + } + +mbx_error: + if (!async) + mutex_unlock(&vport->vc_buf_lock); + kfree(ma_list); +list_prep_error: + kfree(mac_addr); +error: + if (err) + dev_err(&pdev->dev, "Failed to add or del mac filters %d", err); + + return err; +} + +/** + * idpf_set_promiscuous - set promiscuous and send message to mailbox + * @adapter: Driver specific private structure + * @config_data: Vport specific config data + * @vport_id: Vport identifier + * + * Request to enable promiscuous mode for the vport. Message is sent + * asynchronously and won't wait for response. Returns 0 on success, negative + * on failure; + */ +int idpf_set_promiscuous(struct idpf_adapter *adapter, + struct idpf_vport_user_config_data *config_data, + u32 vport_id) +{ + struct virtchnl2_promisc_info vpi; + u16 flags = 0; + int err; + + if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) + flags |= VIRTCHNL2_UNICAST_PROMISC; + if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) + flags |= VIRTCHNL2_MULTICAST_PROMISC; + + vpi.vport_id = cpu_to_le32(vport_id); + vpi.flags = cpu_to_le16(flags); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE, + sizeof(struct virtchnl2_promisc_info), + (u8 *)&vpi); + + return err; +} diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h new file mode 100644 index 0000000000..4a3c4454d2 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -0,0 +1,1273 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _VIRTCHNL2_H_ +#define _VIRTCHNL2_H_ + +/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or + * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures, + * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion. + * + * PF/VF uses the virtchnl2 interface defined in this header file to communicate + * with device Control Plane (CP). Driver and the CP may run on different + * platforms with different endianness. To avoid byte order discrepancies, + * all the structures in this header follow little-endian format. + * + * This is an interface definition file where existing enums and their values + * must remain unchanged over time, so we specify explicit values for all enums. + */ + +#include "virtchnl2_lan_desc.h" + +/* This macro is used to generate compilation errors if a structure + * is not exactly the correct length. + */ +#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) \ + static_assert((n) == sizeof(struct X)) + +/* New major set of opcodes introduced and so leaving room for + * old misc opcodes to be added in future. Also these opcodes may only + * be used if both the PF and VF have successfully negotiated the + * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange. + */ +enum virtchnl2_op { + VIRTCHNL2_OP_UNKNOWN = 0, + VIRTCHNL2_OP_VERSION = 1, + VIRTCHNL2_OP_GET_CAPS = 500, + VIRTCHNL2_OP_CREATE_VPORT = 501, + VIRTCHNL2_OP_DESTROY_VPORT = 502, + VIRTCHNL2_OP_ENABLE_VPORT = 503, + VIRTCHNL2_OP_DISABLE_VPORT = 504, + VIRTCHNL2_OP_CONFIG_TX_QUEUES = 505, + VIRTCHNL2_OP_CONFIG_RX_QUEUES = 506, + VIRTCHNL2_OP_ENABLE_QUEUES = 507, + VIRTCHNL2_OP_DISABLE_QUEUES = 508, + VIRTCHNL2_OP_ADD_QUEUES = 509, + VIRTCHNL2_OP_DEL_QUEUES = 510, + VIRTCHNL2_OP_MAP_QUEUE_VECTOR = 511, + VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR = 512, + VIRTCHNL2_OP_GET_RSS_KEY = 513, + VIRTCHNL2_OP_SET_RSS_KEY = 514, + VIRTCHNL2_OP_GET_RSS_LUT = 515, + VIRTCHNL2_OP_SET_RSS_LUT = 516, + VIRTCHNL2_OP_GET_RSS_HASH = 517, + VIRTCHNL2_OP_SET_RSS_HASH = 518, + VIRTCHNL2_OP_SET_SRIOV_VFS = 519, + VIRTCHNL2_OP_ALLOC_VECTORS = 520, + VIRTCHNL2_OP_DEALLOC_VECTORS = 521, + VIRTCHNL2_OP_EVENT = 522, + VIRTCHNL2_OP_GET_STATS = 523, + VIRTCHNL2_OP_RESET_VF = 524, + VIRTCHNL2_OP_GET_EDT_CAPS = 525, + VIRTCHNL2_OP_GET_PTYPE_INFO = 526, + /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and + * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW. + * Opcodes 529, 530, 531, 532 and 533 are reserved. + */ + VIRTCHNL2_OP_LOOPBACK = 534, + VIRTCHNL2_OP_ADD_MAC_ADDR = 535, + VIRTCHNL2_OP_DEL_MAC_ADDR = 536, + VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537, +}; + +/** + * enum virtchnl2_vport_type - Type of virtual port. + * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type. + */ +enum virtchnl2_vport_type { + VIRTCHNL2_VPORT_TYPE_DEFAULT = 0, +}; + +/** + * enum virtchnl2_queue_model - Type of queue model. + * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model. + * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model. + * + * In the single queue model, the same transmit descriptor queue is used by + * software to post descriptors to hardware and by hardware to post completed + * descriptors to software. + * Likewise, the same receive descriptor queue is used by hardware to post + * completions to software and by software to post buffers to hardware. + * + * In the split queue model, hardware uses transmit completion queues to post + * descriptor/buffer completions to software, while software uses transmit + * descriptor queues to post descriptors to hardware. + * Likewise, hardware posts descriptor completions to the receive descriptor + * queue, while software uses receive buffer queues to post buffers to hardware. + */ +enum virtchnl2_queue_model { + VIRTCHNL2_QUEUE_MODEL_SINGLE = 0, + VIRTCHNL2_QUEUE_MODEL_SPLIT = 1, +}; + +/* Checksum offload capability flags */ +enum virtchnl2_cap_txrx_csum { + VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 = BIT(0), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP = BIT(1), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP = BIT(2), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP = BIT(3), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP = BIT(4), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP = BIT(5), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP = BIT(6), + VIRTCHNL2_CAP_TX_CSUM_GENERIC = BIT(7), + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 = BIT(8), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP = BIT(9), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP = BIT(10), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP = BIT(11), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP = BIT(12), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP = BIT(13), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP = BIT(14), + VIRTCHNL2_CAP_RX_CSUM_GENERIC = BIT(15), + VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL = BIT(16), + VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL = BIT(17), + VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL = BIT(18), + VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL = BIT(19), + VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL = BIT(20), + VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL = BIT(21), + VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL = BIT(22), + VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL = BIT(23), +}; + +/* Segmentation offload capability flags */ +enum virtchnl2_cap_seg { + VIRTCHNL2_CAP_SEG_IPV4_TCP = BIT(0), + VIRTCHNL2_CAP_SEG_IPV4_UDP = BIT(1), + VIRTCHNL2_CAP_SEG_IPV4_SCTP = BIT(2), + VIRTCHNL2_CAP_SEG_IPV6_TCP = BIT(3), + VIRTCHNL2_CAP_SEG_IPV6_UDP = BIT(4), + VIRTCHNL2_CAP_SEG_IPV6_SCTP = BIT(5), + VIRTCHNL2_CAP_SEG_GENERIC = BIT(6), + VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL = BIT(7), + VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8), +}; + +/* Receive Side Scaling Flow type capability flags */ +enum virtchnl2_cap_rss { + VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0), + VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1), + VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2), + VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3), + VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4), + VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5), + VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6), + VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7), + VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8), + VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9), + VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10), + VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11), + VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12), + VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13), +}; + +/* Header split capability flags */ +enum virtchnl2_cap_rx_hsplit_at { + /* for prepended metadata */ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 = BIT(0), + /* all VLANs go into header buffer */ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 = BIT(1), + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 = BIT(2), + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6 = BIT(3), +}; + +/* Receive Side Coalescing offload capability flags */ +enum virtchnl2_cap_rsc { + VIRTCHNL2_CAP_RSC_IPV4_TCP = BIT(0), + VIRTCHNL2_CAP_RSC_IPV4_SCTP = BIT(1), + VIRTCHNL2_CAP_RSC_IPV6_TCP = BIT(2), + VIRTCHNL2_CAP_RSC_IPV6_SCTP = BIT(3), +}; + +/* Other capability flags */ +enum virtchnl2_cap_other { + VIRTCHNL2_CAP_RDMA = BIT_ULL(0), + VIRTCHNL2_CAP_SRIOV = BIT_ULL(1), + VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2), + VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3), + /* Queue based scheduling using split queue model */ + VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4), + VIRTCHNL2_CAP_CRC = BIT_ULL(5), + VIRTCHNL2_CAP_ADQ = BIT_ULL(6), + VIRTCHNL2_CAP_WB_ON_ITR = BIT_ULL(7), + VIRTCHNL2_CAP_PROMISC = BIT_ULL(8), + VIRTCHNL2_CAP_LINK_SPEED = BIT_ULL(9), + VIRTCHNL2_CAP_INLINE_IPSEC = BIT_ULL(10), + VIRTCHNL2_CAP_LARGE_NUM_QUEUES = BIT_ULL(11), + VIRTCHNL2_CAP_VLAN = BIT_ULL(12), + VIRTCHNL2_CAP_PTP = BIT_ULL(13), + /* EDT: Earliest Departure Time capability used for Timing Wheel */ + VIRTCHNL2_CAP_EDT = BIT_ULL(14), + VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15), + VIRTCHNL2_CAP_FDIR = BIT_ULL(16), + VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17), + VIRTCHNL2_CAP_PTYPE = BIT_ULL(18), + VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19), + /* Other capability 20 is reserved */ + + /* this must be the last capability */ + VIRTCHNL2_CAP_OEM = BIT_ULL(63), +}; + +/* underlying device type */ +enum virtchl2_device_type { + VIRTCHNL2_MEV_DEVICE = 0, +}; + +/** + * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes. + * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder + * completions where descriptors and buffers + * are completed at the same time. + * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order + * packet processing where descriptors are + * cleaned in order, but buffers can be + * completed out of order. + */ +enum virtchnl2_txq_sched_mode { + VIRTCHNL2_TXQ_SCHED_MODE_QUEUE = 0, + VIRTCHNL2_TXQ_SCHED_MODE_FLOW = 1, +}; + +/** + * enum virtchnl2_rxq_flags - Receive Queue Feature flags. + * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag. + * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag. + * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed + * by hardware immediately after processing + * each packet. + * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size. + * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size. + */ +enum virtchnl2_rxq_flags { + VIRTCHNL2_RXQ_RSC = BIT(0), + VIRTCHNL2_RXQ_HDR_SPLIT = BIT(1), + VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK = BIT(2), + VIRTCHNL2_RX_DESC_SIZE_16BYTE = BIT(3), + VIRTCHNL2_RX_DESC_SIZE_32BYTE = BIT(4), +}; + +/* Type of RSS algorithm */ +enum virtchnl2_rss_alg { + VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, + VIRTCHNL2_RSS_ALG_R_ASYMMETRIC = 1, + VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, + VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC = 3, +}; + +/* Type of event */ +enum virtchnl2_event_codes { + VIRTCHNL2_EVENT_UNKNOWN = 0, + VIRTCHNL2_EVENT_LINK_CHANGE = 1, + /* Event type 2, 3 are reserved */ +}; + +/* Transmit and Receive queue types are valid in legacy as well as split queue + * models. With Split Queue model, 2 additional types are introduced - + * TX_COMPLETION and RX_BUFFER. In split queue model, receive corresponds to + * the queue where hardware posts completions. + */ +enum virtchnl2_queue_type { + VIRTCHNL2_QUEUE_TYPE_TX = 0, + VIRTCHNL2_QUEUE_TYPE_RX = 1, + VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION = 2, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER = 3, + VIRTCHNL2_QUEUE_TYPE_CONFIG_TX = 4, + VIRTCHNL2_QUEUE_TYPE_CONFIG_RX = 5, + /* Queue types 6, 7, 8, 9 are reserved */ + VIRTCHNL2_QUEUE_TYPE_MBX_TX = 10, + VIRTCHNL2_QUEUE_TYPE_MBX_RX = 11, +}; + +/* Interrupt throttling rate index */ +enum virtchnl2_itr_idx { + VIRTCHNL2_ITR_IDX_0 = 0, + VIRTCHNL2_ITR_IDX_1 = 1, +}; + +/** + * enum virtchnl2_mac_addr_type - MAC address types. + * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the + * primary/device unicast MAC address filter for + * VIRTCHNL2_OP_ADD_MAC_ADDR and + * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the + * underlying control plane function to accurately + * track the MAC address and for VM/function reset. + * + * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra + * unicast and/or multicast filters that are being + * added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or + * VIRTCHNL2_OP_DEL_MAC_ADDR. + */ +enum virtchnl2_mac_addr_type { + VIRTCHNL2_MAC_ADDR_PRIMARY = 1, + VIRTCHNL2_MAC_ADDR_EXTRA = 2, +}; + +/* Flags used for promiscuous mode */ +enum virtchnl2_promisc_flags { + VIRTCHNL2_UNICAST_PROMISC = BIT(0), + VIRTCHNL2_MULTICAST_PROMISC = BIT(1), +}; + +/* Protocol header type within a packet segment. A segment consists of one or + * more protocol headers that make up a logical group of protocol headers. Each + * logical group of protocol headers encapsulates or is encapsulated using/by + * tunneling or encapsulation protocols for network virtualization. + */ +enum virtchnl2_proto_hdr_type { + /* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_ANY = 0, + VIRTCHNL2_PROTO_HDR_PRE_MAC = 1, + /* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_MAC = 2, + VIRTCHNL2_PROTO_HDR_POST_MAC = 3, + VIRTCHNL2_PROTO_HDR_ETHERTYPE = 4, + VIRTCHNL2_PROTO_HDR_VLAN = 5, + VIRTCHNL2_PROTO_HDR_SVLAN = 6, + VIRTCHNL2_PROTO_HDR_CVLAN = 7, + VIRTCHNL2_PROTO_HDR_MPLS = 8, + VIRTCHNL2_PROTO_HDR_UMPLS = 9, + VIRTCHNL2_PROTO_HDR_MMPLS = 10, + VIRTCHNL2_PROTO_HDR_PTP = 11, + VIRTCHNL2_PROTO_HDR_CTRL = 12, + VIRTCHNL2_PROTO_HDR_LLDP = 13, + VIRTCHNL2_PROTO_HDR_ARP = 14, + VIRTCHNL2_PROTO_HDR_ECP = 15, + VIRTCHNL2_PROTO_HDR_EAPOL = 16, + VIRTCHNL2_PROTO_HDR_PPPOD = 17, + VIRTCHNL2_PROTO_HDR_PPPOE = 18, + /* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV4 = 19, + /* IPv4 and IPv6 Fragment header types are only associated to + * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively, + * cannot be used independently. + */ + /* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV4_FRAG = 20, + /* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV6 = 21, + /* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV6_FRAG = 22, + VIRTCHNL2_PROTO_HDR_IPV6_EH = 23, + /* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_UDP = 24, + /* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_TCP = 25, + /* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_SCTP = 26, + /* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_ICMP = 27, + /* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_ICMPV6 = 28, + VIRTCHNL2_PROTO_HDR_IGMP = 29, + VIRTCHNL2_PROTO_HDR_AH = 30, + VIRTCHNL2_PROTO_HDR_ESP = 31, + VIRTCHNL2_PROTO_HDR_IKE = 32, + VIRTCHNL2_PROTO_HDR_NATT_KEEP = 33, + /* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_PAY = 34, + VIRTCHNL2_PROTO_HDR_L2TPV2 = 35, + VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL = 36, + VIRTCHNL2_PROTO_HDR_L2TPV3 = 37, + VIRTCHNL2_PROTO_HDR_GTP = 38, + VIRTCHNL2_PROTO_HDR_GTP_EH = 39, + VIRTCHNL2_PROTO_HDR_GTPCV2 = 40, + VIRTCHNL2_PROTO_HDR_GTPC_TEID = 41, + VIRTCHNL2_PROTO_HDR_GTPU = 42, + VIRTCHNL2_PROTO_HDR_GTPU_UL = 43, + VIRTCHNL2_PROTO_HDR_GTPU_DL = 44, + VIRTCHNL2_PROTO_HDR_ECPRI = 45, + VIRTCHNL2_PROTO_HDR_VRRP = 46, + VIRTCHNL2_PROTO_HDR_OSPF = 47, + /* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_TUN = 48, + VIRTCHNL2_PROTO_HDR_GRE = 49, + VIRTCHNL2_PROTO_HDR_NVGRE = 50, + VIRTCHNL2_PROTO_HDR_VXLAN = 51, + VIRTCHNL2_PROTO_HDR_VXLAN_GPE = 52, + VIRTCHNL2_PROTO_HDR_GENEVE = 53, + VIRTCHNL2_PROTO_HDR_NSH = 54, + VIRTCHNL2_PROTO_HDR_QUIC = 55, + VIRTCHNL2_PROTO_HDR_PFCP = 56, + VIRTCHNL2_PROTO_HDR_PFCP_NODE = 57, + VIRTCHNL2_PROTO_HDR_PFCP_SESSION = 58, + VIRTCHNL2_PROTO_HDR_RTP = 59, + VIRTCHNL2_PROTO_HDR_ROCE = 60, + VIRTCHNL2_PROTO_HDR_ROCEV1 = 61, + VIRTCHNL2_PROTO_HDR_ROCEV2 = 62, + /* Protocol ids up to 32767 are reserved. + * 32768 - 65534 are used for user defined protocol ids. + * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id. + */ + VIRTCHNL2_PROTO_HDR_NO_PROTO = 65535, +}; + +enum virtchl2_version { + VIRTCHNL2_VERSION_MINOR_0 = 0, + VIRTCHNL2_VERSION_MAJOR_2 = 2, +}; + +/** + * struct virtchnl2_edt_caps - Get EDT granularity and time horizon. + * @tstamp_granularity_ns: Timestamp granularity in nanoseconds. + * @time_horizon_ns: Total time window in nanoseconds. + * + * Associated with VIRTCHNL2_OP_GET_EDT_CAPS. + */ +struct virtchnl2_edt_caps { + __le64 tstamp_granularity_ns; + __le64 time_horizon_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps); + +/** + * struct virtchnl2_version_info - Version information. + * @major: Major version. + * @minor: Minor version. + * + * PF/VF posts its version number to the CP. CP responds with its version number + * in the same format, along with a return code. + * If there is a major version mismatch, then the PF/VF cannot operate. + * If there is a minor version mismatch, then the PF/VF can operate but should + * add a warning to the system log. + * + * This version opcode MUST always be specified as == 1, regardless of other + * changes in the API. The CP must always respond to this message without + * error regardless of version mismatch. + * + * Associated with VIRTCHNL2_OP_VERSION. + */ +struct virtchnl2_version_info { + __le32 major; + __le32 minor; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info); + +/** + * struct virtchnl2_get_capabilities - Capabilities info. + * @csum_caps: See enum virtchnl2_cap_txrx_csum. + * @seg_caps: See enum virtchnl2_cap_seg. + * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at. + * @rsc_caps: See enum virtchnl2_cap_rsc. + * @rss_caps: See enum virtchnl2_cap_rss. + * @other_caps: See enum virtchnl2_cap_other. + * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox + * provided by CP. + * @mailbox_vector_id: Mailbox vector id. + * @num_allocated_vectors: Maximum number of allocated vectors for the device. + * @max_rx_q: Maximum number of supported Rx queues. + * @max_tx_q: Maximum number of supported Tx queues. + * @max_rx_bufq: Maximum number of supported buffer queues. + * @max_tx_complq: Maximum number of supported completion queues. + * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP + * responds with the maximum VFs granted. + * @max_vports: Maximum number of vports that can be supported. + * @default_num_vports: Default number of vports driver should allocate on load. + * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes. + * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be + * sent per transmit packet without needing to be + * linearized. + * @pad: Padding. + * @reserved: Reserved. + * @device_type: See enum virtchl2_device_type. + * @min_sso_packet_len: Min packet length supported by device for single + * segment offload. + * @max_hdr_buf_per_lso: Max number of header buffers that can be used for + * an LSO. + * @pad1: Padding for future extensions. + * + * Dataplane driver sends this message to CP to negotiate capabilities and + * provides a virtchnl2_get_capabilities structure with its desired + * capabilities, max_sriov_vfs and num_allocated_vectors. + * CP responds with a virtchnl2_get_capabilities structure updated + * with allowed capabilities and the other fields as below. + * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs + * that can be created by this PF. For any other value 'n', CP responds + * with max_sriov_vfs set to min(n, x) where x is the max number of VFs + * allowed by CP's policy. max_sriov_vfs is not applicable for VFs. + * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1 + * which is default vector associated with the default mailbox. For any other + * value 'n', CP responds with a value <= n based on the CP's policy of + * max number of vectors for a PF. + * CP will respond with the vector ID of mailbox allocated to the PF in + * mailbox_vector_id and the number of itr index registers in itr_idx_map. + * It also responds with default number of vports that the dataplane driver + * should comeup with in default_num_vports and maximum number of vports that + * can be supported in max_vports. + * + * Associated with VIRTCHNL2_OP_GET_CAPS. + */ +struct virtchnl2_get_capabilities { + __le32 csum_caps; + __le32 seg_caps; + __le32 hsplit_caps; + __le32 rsc_caps; + __le64 rss_caps; + __le64 other_caps; + __le32 mailbox_dyn_ctl; + __le16 mailbox_vector_id; + __le16 num_allocated_vectors; + __le16 max_rx_q; + __le16 max_tx_q; + __le16 max_rx_bufq; + __le16 max_tx_complq; + __le16 max_sriov_vfs; + __le16 max_vports; + __le16 default_num_vports; + __le16 max_tx_hdr_size; + u8 max_sg_bufs_per_tx_pkt; + u8 pad[3]; + u8 reserved[4]; + __le32 device_type; + u8 min_sso_packet_len; + u8 max_hdr_buf_per_lso; + u8 pad1[10]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities); + +/** + * struct virtchnl2_queue_reg_chunk - Single queue chunk. + * @type: See enum virtchnl2_queue_type. + * @start_queue_id: Start Queue ID. + * @num_queues: Number of queues in the chunk. + * @pad: Padding. + * @qtail_reg_start: Queue tail register offset. + * @qtail_reg_spacing: Queue tail register spacing. + * @pad1: Padding for future extensions. + */ +struct virtchnl2_queue_reg_chunk { + __le32 type; + __le32 start_queue_id; + __le32 num_queues; + __le32 pad; + __le64 qtail_reg_start; + __le32 qtail_reg_spacing; + u8 pad1[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk); + +/** + * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous + * queues. + * @num_chunks: Number of chunks. + * @pad: Padding. + * @chunks: Chunks of queue info. + */ +struct virtchnl2_queue_reg_chunks { + __le16 num_chunks; + u8 pad[6]; + struct virtchnl2_queue_reg_chunk chunks[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); + +/** + * struct virtchnl2_create_vport - Create vport config info. + * @vport_type: See enum virtchnl2_vport_type. + * @txq_model: See virtchnl2_queue_model. + * @rxq_model: See virtchnl2_queue_model. + * @num_tx_q: Number of Tx queues. + * @num_tx_complq: Valid only if txq_model is split queue. + * @num_rx_q: Number of Rx queues. + * @num_rx_bufq: Valid only if rxq_model is split queue. + * @default_rx_q: Relative receive queue index to be used as default. + * @vport_index: Used to align PF and CP in case of default multiple vports, + * it is filled by the PF and CP returns the same value, to + * enable the driver to support multiple asynchronous parallel + * CREATE_VPORT requests and associate a response to a specific + * request. + * @max_mtu: Max MTU. CP populates this field on response. + * @vport_id: Vport id. CP populates this field on response. + * @default_mac_addr: Default MAC address. + * @pad: Padding. + * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. + * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions. + * @pad1: Padding. + * @rss_algorithm: RSS algorithm. + * @rss_key_size: RSS key size. + * @rss_lut_size: RSS LUT size. + * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at. + * @pad2: Padding. + * @chunks: Chunks of contiguous queues. + * + * PF sends this message to CP to create a vport by filling in required + * fields of virtchnl2_create_vport structure. + * CP responds with the updated virtchnl2_create_vport structure containing the + * necessary fields followed by chunks which in turn will have an array of + * num_chunks entries of virtchnl2_queue_chunk structures. + * + * Associated with VIRTCHNL2_OP_CREATE_VPORT. + */ +struct virtchnl2_create_vport { + __le16 vport_type; + __le16 txq_model; + __le16 rxq_model; + __le16 num_tx_q; + __le16 num_tx_complq; + __le16 num_rx_q; + __le16 num_rx_bufq; + __le16 default_rx_q; + __le16 vport_index; + /* CP populates the following fields on response */ + __le16 max_mtu; + __le32 vport_id; + u8 default_mac_addr[ETH_ALEN]; + __le16 pad; + __le64 rx_desc_ids; + __le64 tx_desc_ids; + u8 pad1[72]; + __le32 rss_algorithm; + __le16 rss_key_size; + __le16 rss_lut_size; + __le32 rx_split_pos; + u8 pad2[20]; + struct virtchnl2_queue_reg_chunks chunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport); + +/** + * struct virtchnl2_vport - Vport ID info. + * @vport_id: Vport id. + * @pad: Padding for future extensions. + * + * PF sends this message to CP to destroy, enable or disable a vport by filling + * in the vport_id in virtchnl2_vport structure. + * CP responds with the status of the requested operation. + * + * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT, + * VIRTCHNL2_OP_DISABLE_VPORT. + */ +struct virtchnl2_vport { + __le32 vport_id; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport); + +/** + * struct virtchnl2_txq_info - Transmit queue config info + * @dma_ring_addr: DMA address. + * @type: See enum virtchnl2_queue_type. + * @queue_id: Queue ID. + * @relative_queue_id: Valid only if queue model is split and type is transmit + * queue. Used in many to one mapping of transmit queues to + * completion queue. + * @model: See enum virtchnl2_queue_model. + * @sched_mode: See enum virtchnl2_txq_sched_mode. + * @qflags: TX queue feature flags. + * @ring_len: Ring length. + * @tx_compl_queue_id: Valid only if queue model is split and type is transmit + * queue. + * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX + * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver + * messages for the respective CONFIG_TX queue. + * @pad: Padding. + * @egress_pasid: Egress PASID info. + * @egress_hdr_pasid: Egress HDR passid. + * @egress_buf_pasid: Egress buf passid. + * @pad1: Padding for future extensions. + */ +struct virtchnl2_txq_info { + __le64 dma_ring_addr; + __le32 type; + __le32 queue_id; + __le16 relative_queue_id; + __le16 model; + __le16 sched_mode; + __le16 qflags; + __le16 ring_len; + __le16 tx_compl_queue_id; + __le16 peer_type; + __le16 peer_rx_queue_id; + u8 pad[4]; + __le32 egress_pasid; + __le32 egress_hdr_pasid; + __le32 egress_buf_pasid; + u8 pad1[8]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info); + +/** + * struct virtchnl2_config_tx_queues - TX queue config. + * @vport_id: Vport id. + * @num_qinfo: Number of virtchnl2_txq_info structs. + * @pad: Padding. + * @qinfo: Tx queues config info. + * + * PF sends this message to set up parameters for one or more transmit queues. + * This message contains an array of num_qinfo instances of virtchnl2_txq_info + * structures. CP configures requested queues and returns a status code. If + * num_qinfo specified is greater than the number of queues associated with the + * vport, an error is returned and no queues are configured. + * + * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES. + */ +struct virtchnl2_config_tx_queues { + __le32 vport_id; + __le16 num_qinfo; + u8 pad[10]; + struct virtchnl2_txq_info qinfo[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues); + +/** + * struct virtchnl2_rxq_info - Receive queue config info. + * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. + * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions. + * @type: See enum virtchnl2_queue_type. + * @queue_id: Queue id. + * @model: See enum virtchnl2_queue_model. + * @hdr_buffer_size: Header buffer size. + * @data_buffer_size: Data buffer size. + * @max_pkt_size: Max packet size. + * @ring_len: Ring length. + * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors. + * This field must be a power of 2. + * @pad: Padding. + * @dma_head_wb_addr: Applicable only for receive buffer queues. + * @qflags: Applicable only for receive completion queues. + * See enum virtchnl2_rxq_flags. + * @rx_buffer_low_watermark: Rx buffer low watermark. + * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with + * the Rx queue. Valid only in split queue model. + * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with + * the Rx queue. Valid only in split queue model. + * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid + * only if this field is set. + * @pad1: Padding. + * @ingress_pasid: Ingress PASID. + * @ingress_hdr_pasid: Ingress PASID header. + * @ingress_buf_pasid: Ingress PASID buffer. + * @pad2: Padding for future extensions. + */ +struct virtchnl2_rxq_info { + __le64 desc_ids; + __le64 dma_ring_addr; + __le32 type; + __le32 queue_id; + __le16 model; + __le16 hdr_buffer_size; + __le32 data_buffer_size; + __le32 max_pkt_size; + __le16 ring_len; + u8 buffer_notif_stride; + u8 pad; + __le64 dma_head_wb_addr; + __le16 qflags; + __le16 rx_buffer_low_watermark; + __le16 rx_bufq1_id; + __le16 rx_bufq2_id; + u8 bufq2_ena; + u8 pad1[3]; + __le32 ingress_pasid; + __le32 ingress_hdr_pasid; + __le32 ingress_buf_pasid; + u8 pad2[16]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info); + +/** + * struct virtchnl2_config_rx_queues - Rx queues config. + * @vport_id: Vport id. + * @num_qinfo: Number of instances. + * @pad: Padding. + * @qinfo: Rx queues config info. + * + * PF sends this message to set up parameters for one or more receive queues. + * This message contains an array of num_qinfo instances of virtchnl2_rxq_info + * structures. CP configures requested queues and returns a status code. + * If the number of queues specified is greater than the number of queues + * associated with the vport, an error is returned and no queues are configured. + * + * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES. + */ +struct virtchnl2_config_rx_queues { + __le32 vport_id; + __le16 num_qinfo; + u8 pad[18]; + struct virtchnl2_rxq_info qinfo[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues); + +/** + * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES. + * @vport_id: Vport id. + * @num_tx_q: Number of Tx qieues. + * @num_tx_complq: Number of Tx completion queues. + * @num_rx_q: Number of Rx queues. + * @num_rx_bufq: Number of Rx buffer queues. + * @pad: Padding. + * @chunks: Chunks of contiguous queues. + * + * PF sends this message to request additional transmit/receive queues beyond + * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues + * structure is used to specify the number of each type of queues. + * CP responds with the same structure with the actual number of queues assigned + * followed by num_chunks of virtchnl2_queue_chunk structures. + * + * Associated with VIRTCHNL2_OP_ADD_QUEUES. + */ +struct virtchnl2_add_queues { + __le32 vport_id; + __le16 num_tx_q; + __le16 num_tx_complq; + __le16 num_rx_q; + __le16 num_rx_bufq; + u8 pad[4]; + struct virtchnl2_queue_reg_chunks chunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues); + +/** + * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous + * interrupt vectors. + * @start_vector_id: Start vector id. + * @start_evv_id: Start EVV id. + * @num_vectors: Number of vectors. + * @pad: Padding. + * @dynctl_reg_start: DYN_CTL register offset. + * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2 + * consecutive vectors. + * @itrn_reg_start: ITRN register offset. + * @itrn_reg_spacing: Register spacing between dynctl registers of 2 + * consecutive vectors. + * @itrn_index_spacing: Register spacing between itrn registers of the same + * vector where n=0..2. + * @pad1: Padding for future extensions. + * + * Register offsets and spacing provided by CP. + * Dynamic control registers are used for enabling/disabling/re-enabling + * interrupts and updating interrupt rates in the hotpath. Any changes + * to interrupt rates in the dynamic control registers will be reflected + * in the interrupt throttling rate registers. + * itrn registers are used to update interrupt rates for specific + * interrupt indices without modifying the state of the interrupt. + */ +struct virtchnl2_vector_chunk { + __le16 start_vector_id; + __le16 start_evv_id; + __le16 num_vectors; + __le16 pad; + __le32 dynctl_reg_start; + __le32 dynctl_reg_spacing; + __le32 itrn_reg_start; + __le32 itrn_reg_spacing; + __le32 itrn_index_spacing; + u8 pad1[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk); + +/** + * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors. + * @num_vchunks: number of vector chunks. + * @pad: Padding. + * @vchunks: Chunks of contiguous vector info. + * + * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving + * away. CP performs requested action and returns status. + * + * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS. + */ +struct virtchnl2_vector_chunks { + __le16 num_vchunks; + u8 pad[14]; + struct virtchnl2_vector_chunk vchunks[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks); + +/** + * struct virtchnl2_alloc_vectors - vector allocation info. + * @num_vectors: Number of vectors. + * @pad: Padding. + * @vchunks: Chunks of contiguous vector info. + * + * PF sends this message to request additional interrupt vectors beyond the + * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors + * structure is used to specify the number of vectors requested. CP responds + * with the same structure with the actual number of vectors assigned followed + * by virtchnl2_vector_chunks structure identifying the vector ids. + * + * Associated with VIRTCHNL2_OP_ALLOC_VECTORS. + */ +struct virtchnl2_alloc_vectors { + __le16 num_vectors; + u8 pad[14]; + struct virtchnl2_vector_chunks vchunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors); + +/** + * struct virtchnl2_rss_lut - RSS LUT info. + * @vport_id: Vport id. + * @lut_entries_start: Start of LUT entries. + * @lut_entries: Number of LUT entrties. + * @pad: Padding. + * @lut: RSS lookup table. + * + * PF sends this message to get or set RSS lookup table. Only supported if + * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration + * negotiation. + * + * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT. + */ +struct virtchnl2_rss_lut { + __le32 vport_id; + __le16 lut_entries_start; + __le16 lut_entries; + u8 pad[4]; + __le32 lut[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut); + +/** + * struct virtchnl2_rss_hash - RSS hash info. + * @ptype_groups: Packet type groups bitmap. + * @vport_id: Vport id. + * @pad: Padding for future extensions. + * + * PF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the CP sets these to all possible traffic types that the + * hardware supports. The PF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit + * during configuration negotiation. + * + * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH + */ +struct virtchnl2_rss_hash { + __le64 ptype_groups; + __le32 vport_id; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash); + +/** + * struct virtchnl2_sriov_vfs_info - VFs info. + * @num_vfs: Number of VFs. + * @pad: Padding for future extensions. + * + * This message is used to set number of SRIOV VFs to be created. The actual + * allocation of resources for the VFs in terms of vport, queues and interrupts + * is done by CP. When this call completes, the IDPF driver calls + * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices. + * The number of VFs set to 0 will destroy all the VFs of this function. + * + * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS. + */ +struct virtchnl2_sriov_vfs_info { + __le16 num_vfs; + __le16 pad; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info); + +/** + * struct virtchnl2_ptype - Packet type info. + * @ptype_id_10: 10-bit packet type. + * @ptype_id_8: 8-bit packet type. + * @proto_id_count: Number of protocol ids the packet supports, maximum of 32 + * protocol ids are supported. + * @pad: Padding. + * @proto_id: proto_id_count decides the allocation of protocol id array. + * See enum virtchnl2_proto_hdr_type. + * + * Based on the descriptor type the PF supports, CP fills ptype_id_10 or + * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value + * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the + * last ptype. + */ +struct virtchnl2_ptype { + __le16 ptype_id_10; + u8 ptype_id_8; + u8 proto_id_count; + __le16 pad; + __le16 proto_id[]; +} __packed __aligned(2); +VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype); + +/** + * struct virtchnl2_get_ptype_info - Packet type info. + * @start_ptype_id: Starting ptype ID. + * @num_ptypes: Number of packet types from start_ptype_id. + * @pad: Padding for future extensions. + * + * The total number of supported packet types is based on the descriptor type. + * For the flex descriptor, it is 1024 (10-bit ptype), and for the base + * descriptor, it is 256 (8-bit ptype). Send this message to the CP by + * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the + * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that + * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There + * is no specific field for the ptypes but are added at the end of the + * ptype info message. PF/VF is expected to extract the ptypes accordingly. + * Reason for doing this is because compiler doesn't allow nested flexible + * array fields). + * + * If all the ptypes don't fit into one mailbox buffer, CP splits the + * ptype info into multiple messages, where each message will have its own + * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done + * updating all the ptype information extracted from the package (the number of + * ptypes extracted might be less than what PF/VF expects), it will append a + * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF) + * to the ptype array. + * + * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages. + * + * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO. + */ +struct virtchnl2_get_ptype_info { + __le16 start_ptype_id; + __le16 num_ptypes; + __le32 pad; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info); + +/** + * struct virtchnl2_vport_stats - Vport statistics. + * @vport_id: Vport id. + * @pad: Padding. + * @rx_bytes: Received bytes. + * @rx_unicast: Received unicast packets. + * @rx_multicast: Received multicast packets. + * @rx_broadcast: Received broadcast packets. + * @rx_discards: Discarded packets on receive. + * @rx_errors: Receive errors. + * @rx_unknown_protocol: Unlnown protocol. + * @tx_bytes: Transmitted bytes. + * @tx_unicast: Transmitted unicast packets. + * @tx_multicast: Transmitted multicast packets. + * @tx_broadcast: Transmitted broadcast packets. + * @tx_discards: Discarded packets on transmit. + * @tx_errors: Transmit errors. + * @rx_invalid_frame_length: Packets with invalid frame length. + * @rx_overflow_drop: Packets dropped on buffer overflow. + * + * PF/VF sends this message to CP to get the update stats by specifying the + * vport_id. CP responds with stats in struct virtchnl2_vport_stats. + * + * Associated with VIRTCHNL2_OP_GET_STATS. + */ +struct virtchnl2_vport_stats { + __le32 vport_id; + u8 pad[4]; + __le64 rx_bytes; + __le64 rx_unicast; + __le64 rx_multicast; + __le64 rx_broadcast; + __le64 rx_discards; + __le64 rx_errors; + __le64 rx_unknown_protocol; + __le64 tx_bytes; + __le64 tx_unicast; + __le64 tx_multicast; + __le64 tx_broadcast; + __le64 tx_discards; + __le64 tx_errors; + __le64 rx_invalid_frame_length; + __le64 rx_overflow_drop; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats); + +/** + * struct virtchnl2_event - Event info. + * @event: Event opcode. See enum virtchnl2_event_codes. + * @link_speed: Link_speed provided in Mbps. + * @vport_id: Vport ID. + * @link_status: Link status. + * @pad: Padding. + * @reserved: Reserved. + * + * CP sends this message to inform the PF/VF driver of events that may affect + * it. No direct response is expected from the driver, though it may generate + * other messages in response to this one. + * + * Associated with VIRTCHNL2_OP_EVENT. + */ +struct virtchnl2_event { + __le32 event; + __le32 link_speed; + __le32 vport_id; + u8 link_status; + u8 pad; + __le16 reserved; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event); + +/** + * struct virtchnl2_rss_key - RSS key info. + * @vport_id: Vport id. + * @key_len: Length of RSS key. + * @pad: Padding. + * @key_flex: RSS hash key, packed bytes. + * PF/VF sends this message to get or set RSS key. Only supported if both + * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration + * negotiation. + * + * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY. + */ +struct virtchnl2_rss_key { + __le32 vport_id; + __le16 key_len; + u8 pad; + u8 key_flex[]; +} __packed; +VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key); + +/** + * struct virtchnl2_queue_chunk - chunk of contiguous queues + * @type: See enum virtchnl2_queue_type. + * @start_queue_id: Starting queue id. + * @num_queues: Number of queues. + * @pad: Padding for future extensions. + */ +struct virtchnl2_queue_chunk { + __le32 type; + __le32 start_queue_id; + __le32 num_queues; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk); + +/* struct virtchnl2_queue_chunks - chunks of contiguous queues + * @num_chunks: Number of chunks. + * @pad: Padding. + * @chunks: Chunks of contiguous queues info. + */ +struct virtchnl2_queue_chunks { + __le16 num_chunks; + u8 pad[6]; + struct virtchnl2_queue_chunk chunks[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks); + +/** + * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info. + * @vport_id: Vport id. + * @pad: Padding. + * @chunks: Chunks of contiguous queues info. + * + * PF sends these messages to enable, disable or delete queues specified in + * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues + * to be enabled/disabled/deleted. Also applicable to single queue receive or + * transmit. CP performs requested action and returns status. + * + * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and + * VIRTCHNL2_OP_DISABLE_QUEUES. + */ +struct virtchnl2_del_ena_dis_queues { + __le32 vport_id; + u8 pad[4]; + struct virtchnl2_queue_chunks chunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues); + +/** + * struct virtchnl2_queue_vector - Queue to vector mapping. + * @queue_id: Queue id. + * @vector_id: Vector id. + * @pad: Padding. + * @itr_idx: See enum virtchnl2_itr_idx. + * @queue_type: See enum virtchnl2_queue_type. + * @pad1: Padding for future extensions. + */ +struct virtchnl2_queue_vector { + __le32 queue_id; + __le16 vector_id; + u8 pad[2]; + __le32 itr_idx; + __le32 queue_type; + u8 pad1[8]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector); + +/** + * struct virtchnl2_queue_vector_maps - Map/unmap queues info. + * @vport_id: Vport id. + * @num_qv_maps: Number of queue vector maps. + * @pad: Padding. + * @qv_maps: Queue to vector maps. + * + * PF sends this message to map or unmap queues to vectors and interrupt + * throttling rate index registers. External data buffer contains + * virtchnl2_queue_vector_maps structure that contains num_qv_maps of + * virtchnl2_queue_vector structures. CP maps the requested queue vector maps + * after validating the queue and vector ids and returns a status code. + * + * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and + * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR. + */ +struct virtchnl2_queue_vector_maps { + __le32 vport_id; + __le16 num_qv_maps; + u8 pad[10]; + struct virtchnl2_queue_vector qv_maps[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps); + +/** + * struct virtchnl2_loopback - Loopback info. + * @vport_id: Vport id. + * @enable: Enable/disable. + * @pad: Padding for future extensions. + * + * PF/VF sends this message to transition to/from the loopback state. Setting + * the 'enable' to 1 enables the loopback state and setting 'enable' to 0 + * disables it. CP configures the state to loopback and returns status. + * + * Associated with VIRTCHNL2_OP_LOOPBACK. + */ +struct virtchnl2_loopback { + __le32 vport_id; + u8 enable; + u8 pad[3]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback); + +/* struct virtchnl2_mac_addr - MAC address info. + * @addr: MAC address. + * @type: MAC type. See enum virtchnl2_mac_addr_type. + * @pad: Padding for future extensions. + */ +struct virtchnl2_mac_addr { + u8 addr[ETH_ALEN]; + u8 type; + u8 pad; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr); + +/** + * struct virtchnl2_mac_addr_list - List of MAC addresses. + * @vport_id: Vport id. + * @num_mac_addr: Number of MAC addresses. + * @pad: Padding. + * @mac_addr_list: List with MAC address info. + * + * PF/VF driver uses this structure to send list of MAC addresses to be + * added/deleted to the CP where as CP performs the action and returns the + * status. + * + * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR. + */ +struct virtchnl2_mac_addr_list { + __le32 vport_id; + __le16 num_mac_addr; + u8 pad[2]; + struct virtchnl2_mac_addr mac_addr_list[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list); + +/** + * struct virtchnl2_promisc_info - Promisc type info. + * @vport_id: Vport id. + * @flags: See enum virtchnl2_promisc_flags. + * @pad: Padding for future extensions. + * + * PF/VF sends vport id and flags to the CP where as CP performs the action + * and returns the status. + * + * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE. + */ +struct virtchnl2_promisc_info { + __le32 vport_id; + /* See VIRTCHNL2_PROMISC_FLAGS definitions */ + __le16 flags; + u8 pad[2]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info); + +#endif /* _VIRTCHNL_2_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h new file mode 100644 index 0000000000..f1b577f1c4 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h @@ -0,0 +1,451 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _VIRTCHNL2_LAN_DESC_H_ +#define _VIRTCHNL2_LAN_DESC_H_ + +#include + +/* This is an interface definition file where existing enums and their values + * must remain unchanged over time, so we specify explicit values for all enums. + */ + +/* Transmit descriptor ID flags + */ +enum virtchnl2_tx_desc_ids { + VIRTCHNL2_TXDID_DATA = BIT(0), + VIRTCHNL2_TXDID_CTX = BIT(1), + /* TXDID bit 2 is reserved + * TXDID bit 3 is free for future use + * TXDID bit 4 is reserved + */ + VIRTCHNL2_TXDID_FLEX_TSO_CTX = BIT(5), + /* TXDID bit 6 is reserved */ + VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2 = BIT(7), + /* TXDID bits 8 and 9 are free for future use + * TXDID bit 10 is reserved + * TXDID bit 11 is free for future use + */ + VIRTCHNL2_TXDID_FLEX_FLOW_SCHED = BIT(12), + /* TXDID bits 13 and 14 are free for future use */ + VIRTCHNL2_TXDID_DESC_DONE = BIT(15), +}; + +/* Receive descriptor IDs */ +enum virtchnl2_rx_desc_ids { + VIRTCHNL2_RXDID_1_32B_BASE = 1, + /* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be + * differentiated based on queue model; e.g. single queue model can + * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ + * for DID 2. + */ + VIRTCHNL2_RXDID_2_FLEX_SPLITQ = 2, + VIRTCHNL2_RXDID_2_FLEX_SQ_NIC = VIRTCHNL2_RXDID_2_FLEX_SPLITQ, + /* 3 through 6 are reserved */ + VIRTCHNL2_RXDID_7_HW_RSVD = 7, + /* 8 through 15 are free */ +}; + +/* Receive descriptor ID bitmasks */ +#define VIRTCHNL2_RXDID_M(bit) BIT_ULL(VIRTCHNL2_RXDID_##bit) + +enum virtchnl2_rx_desc_id_bitmasks { + VIRTCHNL2_RXDID_1_32B_BASE_M = VIRTCHNL2_RXDID_M(1_32B_BASE), + VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M = VIRTCHNL2_RXDID_M(2_FLEX_SPLITQ), + VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL2_RXDID_M(2_FLEX_SQ_NIC), + VIRTCHNL2_RXDID_7_HW_RSVD_M = VIRTCHNL2_RXDID_M(7_HW_RSVD), +}; + +/* For splitq virtchnl2_rx_flex_desc_adv_nic_3 desc members */ +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M GENMASK(3, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M GENMASK(7, 6) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M GENMASK(9, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S 12 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M GENMASK(15, 13) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M GENMASK(13, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S 14 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S 15 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M GENMASK(9, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S 10 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S 11 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 12 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M GENMASK(14, 12) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S) + +/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */ +enum virtchl2_rx_flex_desc_adv_status_error_0_qw1_bits { + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_M = BIT(0), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M = BIT(1), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_M = BIT(7), +}; + +/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */ +enum virtchnl2_rx_flex_desc_adv_status_error_0_qw0_bits { + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_M = BIT(0), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M = BIT(1), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_M = BIT(7), +}; + +/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */ +enum virtchnl2_rx_flex_desc_adv_status_error_1_bits { + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_M = GENMASK(1, 0), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_M = BIT(7), +}; + +/* For singleq (flex) virtchnl2_rx_flex_desc fields + * For virtchnl2_rx_flex_desc.ptype_flex_flags0 member + */ +#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M GENMASK(9, 0) + +/* For virtchnl2_rx_flex_desc.pkt_len member */ +#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M GENMASK(13, 0) + +/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */ +enum virtchnl2_rx_flex_desc_status_error_0_bits { + VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_M = BIT(0), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_M = BIT(1), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M = BIT(7), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_M = BIT(8), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M = BIT(9), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_M = BIT(10), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_M = BIT(11), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M = BIT(12), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_M = BIT(13), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_M = BIT(14), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_M = BIT(15), +}; + +/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */ +enum virtchnl2_rx_flex_desc_status_error_1_bits { + VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_M = GENMASK(3, 0), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_M = BIT(5), + /* [10:6] reserved */ + VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_M = BIT(11), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_M = BIT(12), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_M = BIT(13), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_M = BIT(14), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_M = BIT(15), +}; + +/* For virtchnl2_rx_flex_desc.ts_low member */ +#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID BIT(0) + +/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */ +#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M GENMASK_ULL(51, 38) +#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M GENMASK_ULL(37, 30) +#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M GENMASK_ULL(26, 19) +#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M GENMASK_ULL(18, 0) + +/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */ +enum virtchnl2_rx_base_desc_status_bits { + VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M = BIT(0), + VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M = BIT(1), + VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_M = BIT(2), + VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M = BIT(3), + VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_M = BIT(4), + VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_M = GENMASK(7, 5), + VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_M = BIT(8), + VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_M = GENMASK(10, 9), + VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_M = BIT(11), + VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_M = GENMASK(13, 12), + VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_M = BIT(14), + VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M = BIT(15), + VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_M = GENMASK(17, 16), + VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_M = BIT(18), +}; + +/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */ +enum virtchnl2_rx_base_desc_error_bits { + VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M = BIT(0), + VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_M = BIT(1), + VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_M = BIT(2), + VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_M = GENMASK(5, 3), + VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M = BIT(3), + VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M = BIT(4), + VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M = BIT(5), + VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_M = BIT(6), + VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M = BIT(7), +}; + +/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */ +#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M GENMASK(13, 12) + +/** + * struct virtchnl2_splitq_rx_buf_desc - SplitQ RX buffer descriptor format + * @qword0: RX buffer struct. + * @qword0.buf_id: Buffer identifier. + * @qword0.rsvd0: Reserved. + * @qword0.rsvd1: Reserved. + * @pkt_addr: Packet buffer address. + * @hdr_addr: Header buffer address. + * @rsvd2: Rerserved. + * + * Receive Descriptors + * SplitQ buffer + * | 16| 0| + * ---------------------------------------------------------------- + * | RSV | Buffer ID | + * ---------------------------------------------------------------- + * | Rx packet buffer address | + * ---------------------------------------------------------------- + * | Rx header buffer address | + * ---------------------------------------------------------------- + * | RSV | + * ---------------------------------------------------------------- + * | 0| + */ +struct virtchnl2_splitq_rx_buf_desc { + struct { + __le16 buf_id; + __le16 rsvd0; + __le32 rsvd1; + } qword0; + __le64 pkt_addr; + __le64 hdr_addr; + __le64 rsvd2; +}; + +/** + * struct virtchnl2_singleq_rx_buf_desc - SingleQ RX buffer descriptor format. + * @pkt_addr: Packet buffer address. + * @hdr_addr: Header buffer address. + * @rsvd1: Reserved. + * @rsvd2: Reserved. + * + * SingleQ buffer + * | 0| + * ---------------------------------------------------------------- + * | Rx packet buffer address | + * ---------------------------------------------------------------- + * | Rx header buffer address | + * ---------------------------------------------------------------- + * | RSV | + * ---------------------------------------------------------------- + * | RSV | + * ---------------------------------------------------------------- + * | 0| + */ +struct virtchnl2_singleq_rx_buf_desc { + __le64 pkt_addr; + __le64 hdr_addr; + __le64 rsvd1; + __le64 rsvd2; +}; + +/** + * struct virtchnl2_singleq_base_rx_desc - RX descriptor writeback format. + * @qword0: First quad word struct. + * @qword0.lo_dword: Lower dual word struct. + * @qword0.lo_dword.mirroring_status: Mirrored packet status. + * @qword0.lo_dword.l2tag1: Stripped L2 tag from the received packet. + * @qword0.hi_dword: High dual word union. + * @qword0.hi_dword.rss: RSS hash. + * @qword0.hi_dword.fd_id: Flow director filter id. + * @qword1: Second quad word struct. + * @qword1.status_error_ptype_len: Status/error/PTYPE/length. + * @qword2: Third quad word struct. + * @qword2.ext_status: Extended status. + * @qword2.rsvd: Reserved. + * @qword2.l2tag2_1: Extracted L2 tag 2 from the packet. + * @qword2.l2tag2_2: Reserved. + * @qword3: Fourth quad word struct. + * @qword3.reserved: Reserved. + * @qword3.fd_id: Flow director filter id. + * + * Profile ID 0x1, SingleQ, base writeback format + */ +struct virtchnl2_singleq_base_rx_desc { + struct { + struct { + __le16 mirroring_status; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; + __le32 fd_id; + } hi_dword; + } qword0; + struct { + __le64 status_error_ptype_len; + } qword1; + struct { + __le16 ext_status; + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + __le32 reserved; + __le32 fd_id; + } qword3; +}; + +/** + * struct virtchnl2_rx_flex_desc_nic - RX descriptor writeback format. + * + * @rxdid: Descriptor builder profile id. + * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0] + * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0] + * @pkt_len: Packet length, [15:14] are reserved. + * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]. + * @status_error0: Status/Error section 0. + * @l2tag1: Stripped L2 tag from the received packet + * @rss_hash: RSS hash. + * @status_error1: Status/Error section 1. + * @flexi_flags2: Flexible flags section 2. + * @ts_low: Lower word of timestamp value. + * @l2tag2_1st: First L2TAG2. + * @l2tag2_2nd: Second L2TAG2. + * @flow_id: Flow id. + * @flex_ts: Timestamp and flexible flow id union. + * @flex_ts.ts_high: Timestamp higher word of the timestamp value. + * @flex_ts.flex.rsvd: Reserved. + * @flex_ts.flex.flow_id_ipv6: IPv6 flow id. + * + * Profile ID 0x2, SingleQ, flex writeback format + */ +struct virtchnl2_rx_flex_desc_nic { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flex_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/** + * struct virtchnl2_rx_flex_desc_adv_nic_3 - RX descriptor writeback format. + * @rxdid_ucast: ucast=[7:6], rsvd=[5:4], profile_id=[3:0]. + * @status_err0_qw0: Status/Error section 0 in quad word 0. + * @ptype_err_fflags0: ff0=[15:12], udp_len_err=[11], ip_hdr_err=[10], + * ptype=[9:0]. + * @pktlen_gen_bufq_id: bufq_id=[15] only in splitq, gen=[14] only in splitq, + * plen=[13:0]. + * @hdrlen_flags: miss_prepend=[15], trunc_mirr=[14], int_udp_0=[13], + * ext_udp0=[12], sph=[11] only in splitq, rsc=[10] + * only in splitq, header=[9:0]. + * @status_err0_qw1: Status/Error section 0 in quad word 1. + * @status_err1: Status/Error section 1. + * @fflags1: Flexible flags section 1. + * @ts_low: Lower word of timestamp value. + * @buf_id: Buffer identifier. Only in splitq mode. + * @misc: Union. + * @misc.raw_cs: Raw checksum. + * @misc.l2tag1: Stripped L2 tag from the received packet + * @misc.rscseglen: + * @hash1: Lower bits of Rx hash value. + * @ff2_mirrid_hash2: Union. + * @ff2_mirrid_hash2.fflags2: Flexible flags section 2. + * @ff2_mirrid_hash2.mirrorid: Mirror id. + * @ff2_mirrid_hash2.rscseglen: RSC segment length. + * @hash3: Upper bits of Rx hash value. + * @l2tag2: Extracted L2 tag 2 from the packet. + * @fmd4: Flexible metadata container 4. + * @l2tag1: Stripped L2 tag from the received packet + * @fmd6: Flexible metadata container 6. + * @ts_high: Timestamp higher word of the timestamp value. + * + * Profile ID 0x2, SplitQ, flex writeback format + * + * Flex-field 0: BufferID + * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW) + * Flex-field 2: Hash[15:0] + * Flex-flags 2: Hash[23:16] + * Flex-field 3: L2TAG2 + * Flex-field 5: L2TAG1 + * Flex-field 7: Timestamp (upper 32 bits) + */ +struct virtchnl2_rx_flex_desc_adv_nic_3 { + /* Qword 0 */ + u8 rxdid_ucast; + u8 status_err0_qw0; + __le16 ptype_err_fflags0; + __le16 pktlen_gen_bufq_id; + __le16 hdrlen_flags; + /* Qword 1 */ + u8 status_err0_qw1; + u8 status_err1; + u8 fflags1; + u8 ts_low; + __le16 buf_id; + union { + __le16 raw_cs; + __le16 l2tag1; + __le16 rscseglen; + } misc; + /* Qword 2 */ + __le16 hash1; + union { + u8 fflags2; + u8 mirrorid; + u8 hash2; + } ff2_mirrid_hash2; + u8 hash3; + __le16 l2tag2; + __le16 fmd4; + /* Qword 3 */ + __le16 l2tag1; + __le16 fmd6; + __le32 ts_high; +}; + +/* Common union for accessing descriptor format structs */ +union virtchnl2_rx_desc { + struct virtchnl2_singleq_base_rx_desc base_wb; + struct virtchnl2_rx_flex_desc_nic flex_nic_wb; + struct virtchnl2_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb; +}; + +#endif /* _VIRTCHNL_LAN_DESC_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index b9b9d35494..53b396fd19 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -5,9 +5,9 @@ * e1000_i211 */ -#include +#include #include - +#include #include "e1000_hw.h" #include "e1000_i210.h" diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index fa136e6e93..0da57e8959 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2007 - 2018 Intel Corporation. */ -#include +#include #include - +#include #include "e1000_mac.h" #include "e1000_nvm.h" diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index a018000f7d..3c1b562a32 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2007 - 2018 Intel Corporation. */ -#include +#include #include - +#include #include "e1000_mac.h" #include "e1000_phy.h" diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 4ee849985e..16d2a55d5e 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2356,10 +2356,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) break; case ETH_SS_STATS: for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", igb_gstrings_stats[i].stat_string); for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", igb_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 76b34cee1d..b2295caa2f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3069,6 +3069,7 @@ void igb_set_fw_version(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fw_version fw; + char *lbuf; igb_get_fw_version(hw, &fw); @@ -3076,36 +3077,34 @@ void igb_set_fw_version(struct igb_adapter *adapter) case e1000_i210: case e1000_i211: if (!(igb_get_flash_presence_i210(hw))) { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, - fw.invm_img_type); + lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); break; } fallthrough; default: - /* if option is rom valid, display its version too */ + /* if option rom is valid, display its version too */ if (fw.or_valid) { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d, 0x%08x, %d.%d.%d", - fw.eep_major, fw.eep_minor, fw.etrack_id, - fw.or_major, fw.or_build, fw.or_patch); + lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, + fw.etrack_id, fw.or_major, fw.or_build, + fw.or_patch); /* no option rom */ } else if (fw.etrack_id != 0X0000) { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, fw.etrack_id); + lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, + fw.etrack_id); } else { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d.%d", - fw.eep_major, fw.eep_minor, fw.eep_build); + lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major, + fw.eep_minor, fw.eep_build); } break; } + + /* the truncate happens here if it doesn't fit */ + strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version)); + kfree(lbuf); } /** @@ -3264,7 +3263,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igb_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = pci_resource_start(pdev, 0); netdev->mem_end = pci_resource_end(pdev, 0); @@ -7857,8 +7856,8 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, { struct pci_dev *pdev = adapter->pdev; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - struct list_head *pos; - struct vf_mac_filter *entry = NULL; + struct vf_mac_filter *entry; + bool found = false; int ret = 0; if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && @@ -7878,8 +7877,7 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, switch (info) { case E1000_VF_MAC_FILTER_CLR: /* remove all unicast MAC filters related to the current VF */ - list_for_each(pos, &adapter->vf_macs.l) { - entry = list_entry(pos, struct vf_mac_filter, l); + list_for_each_entry(entry, &adapter->vf_macs.l, l) { if (entry->vf == vf) { entry->vf = -1; entry->free = true; @@ -7889,13 +7887,14 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, break; case E1000_VF_MAC_FILTER_ADD: /* try to find empty slot in the list */ - list_for_each(pos, &adapter->vf_macs.l) { - entry = list_entry(pos, struct vf_mac_filter, l); - if (entry->free) + list_for_each_entry(entry, &adapter->vf_macs.l, l) { + if (entry->free) { + found = true; break; + } } - if (entry && entry->free) { + if (found) { entry->free = false; entry->vf = vf; ether_addr_copy(entry->vf_mac, addr); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 7ff2752dd7..e6c1fbee04 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -3,25 +3,25 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include -#include +#include #include -#include -#include -#include -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include +#include +#include #include #include - +#include +#include +#include +#include +#include +#include #include "igbvf.h" char igbvf_driver_name[] = "igbvf"; @@ -2785,7 +2785,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igbvf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); adapter->bd_number = cards_found++; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index f7284fa432..859b2636f3 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -773,9 +773,10 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, break; case ETH_SS_STATS: for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + ethtool_sprintf(&p, "%s", + igc_gstrings_stats[i].stat_string); for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", igc_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 17546a035a..d2562c8e80 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018 Intel Corporation */ +#include #include #include "igc_hw.h" diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 98de34d0ce..e9bb403bba 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6935,7 +6935,7 @@ static int igc_probe(struct pci_dev *pdev, */ igc_get_hw_control(adapter); - strncpy(netdev->name, "eth%d", IFNAMSIZ); + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); err = register_netdev(netdev); if (err) goto err_register; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 53b77c969c..d0d9e71701 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018 Intel Corporation */ +#include #include "igc_phy.h" /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 100388968e..3d56481e16 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -123,14 +123,14 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) if (ret_val) return ret_val; if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; /* Check to see if SFP+ module is supported */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; break; default: break; @@ -213,7 +213,7 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, break; default: - return IXGBE_ERR_LINK_SETUP; + return -EIO; } return 0; @@ -283,7 +283,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) /* Validate the water mark configuration */ if (!hw->fc.pause_time) - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; /* Low water mark of zero causes XOFF floods */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { @@ -292,7 +292,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { hw_dbg(hw, "Invalid water mark configuration\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; } } } @@ -369,7 +369,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) break; default: hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } /* Set 802.3x based flow control settings. */ @@ -438,7 +438,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + status = -EIO; hw_dbg(hw, "Autonegotiation did not complete.\n"); } } @@ -478,7 +478,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { hw_dbg(hw, "Link was indicated but link is down\n"); - return IXGBE_ERR_LINK_SETUP; + return -EIO; } return 0; @@ -594,7 +594,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) - return IXGBE_ERR_LINK_SETUP; + return -EINVAL; /* Set KX4/KX support according to speed requested */ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || @@ -701,9 +701,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) /* Init PHY and function pointers, perform SFP setup */ phy_status = hw->phy.ops.init(hw); - if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) + if (phy_status == -EOPNOTSUPP) return phy_status; - if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) + if (phy_status == -ENOENT) goto mac_reset_top; hw->phy.ops.reset(hw); @@ -727,7 +727,7 @@ mac_reset_top: udelay(1); } if (ctrl & IXGBE_CTRL_RST) { - status = IXGBE_ERR_RESET_FAILED; + status = -EIO; hw_dbg(hw, "Reset polling failed to complete.\n"); } @@ -789,7 +789,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); @@ -814,7 +814,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); @@ -845,7 +845,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, u32 vftabyte; if (vlan > 4095) - return IXGBE_ERR_PARAM; + return -EINVAL; /* Determine 32-bit word position in array */ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ @@ -964,7 +964,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, gssr = IXGBE_GSSR_PHY0_SM; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; if (hw->phy.type == ixgbe_phy_nl) { /* @@ -993,7 +993,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { hw_dbg(hw, "EEPROM read did not pass.\n"); - status = IXGBE_ERR_SFP_NOT_PRESENT; + status = -ENOENT; goto out; } @@ -1003,7 +1003,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, *eeprom_data = (u8)(sfp_data >> 8); } else { - status = IXGBE_ERR_PHY; + status = -EIO; } out: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 58ea959a44..339e106a57 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -117,7 +117,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) goto setup_sfp_err; @@ -144,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) if (ret_val) { hw_dbg(hw, " sfp module setup not complete\n"); - return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + return -EIO; } } @@ -159,7 +159,7 @@ setup_sfp_err: usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); hw_err(hw, "eeprom read at offset %d failed\n", data_offset); - return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + return -EIO; } /** @@ -184,7 +184,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; *locked = true; } @@ -219,7 +219,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; locked = true; } @@ -400,7 +400,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, break; default: - return IXGBE_ERR_LINK_SETUP; + return -EIO; } if (hw->phy.multispeed_fiber) { @@ -541,7 +541,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + status = -EIO; hw_dbg(hw, "Autoneg did not complete.\n"); } } @@ -794,7 +794,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) - return IXGBE_ERR_LINK_SETUP; + return -EINVAL; /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) @@ -861,8 +861,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { - status = - IXGBE_ERR_AUTONEG_NOT_COMPLETE; + status = -EIO; hw_dbg(hw, "Autoneg did not complete.\n"); } } @@ -927,7 +926,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) /* Identify PHY and related function pointers */ status = hw->phy.ops.init(hw); - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + if (status == -EOPNOTSUPP) return status; /* Setup SFP module if there is one present. */ @@ -936,7 +935,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) hw->phy.sfp_setup_needed = false; } - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + if (status == -EOPNOTSUPP) return status; /* Reset PHY */ @@ -974,7 +973,7 @@ mac_reset_top: } if (ctrl & IXGBE_CTRL_RST_MASK) { - status = IXGBE_ERR_RESET_FAILED; + status = -EIO; hw_dbg(hw, "Reset polling failed to complete.\n"); } @@ -1093,7 +1092,7 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) udelay(10); } - return IXGBE_ERR_FDIR_CMD_INCOMPLETE; + return -EIO; } /** @@ -1155,7 +1154,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); - return IXGBE_ERR_FDIR_REINIT_FAILED; + return -EIO; } /* Clear FDIR statistics registers (read to clear) */ @@ -1387,7 +1386,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, break; default: hw_dbg(hw, " Error on flow type input\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } /* configure FDIRCMD register */ @@ -1546,7 +1545,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, break; default: hw_dbg(hw, " Error on vm pool mask\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { @@ -1555,14 +1554,14 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, if (input_mask->formatted.dst_port || input_mask->formatted.src_port) { hw_dbg(hw, " Error on src/dst port mask\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } break; case IXGBE_ATR_L4TYPE_MASK: break; default: hw_dbg(hw, " Error on flow type mask\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { @@ -1583,7 +1582,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, break; default: hw_dbg(hw, " Error on VLAN mask\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { @@ -1595,7 +1594,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, break; default: hw_dbg(hw, " Error on flexible byte mask\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ @@ -1824,7 +1823,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) /* Return error if SFP module has been detected but is not supported */ if (hw->phy.type == ixgbe_phy_sfp_unsupported) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; return status; } @@ -1863,13 +1862,13 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) * Verifies that installed the firmware version is 0.6 or higher * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. * - * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or - * if the FW version is not supported. + * Return: -EACCES if the FW is not present or if the FW version is + * not supported. **/ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { - s32 status = IXGBE_ERR_EEPROM_VERSION; u16 fw_offset, fw_ptp_cfg_offset; + s32 status = -EACCES; u16 offset; u16 fw_version = 0; @@ -1883,7 +1882,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) goto fw_version_err; if (fw_offset == 0 || fw_offset == 0xFFFF) - return IXGBE_ERR_EEPROM_VERSION; + return -EACCES; /* get the offset to the Pass Through Patch Configuration block */ offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; @@ -1891,7 +1890,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) goto fw_version_err; if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF) - return IXGBE_ERR_EEPROM_VERSION; + return -EACCES; /* get the firmware version */ offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; @@ -1905,7 +1904,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) fw_version_err: hw_err(hw, "eeprom read at offset %d failed\n", offset); - return IXGBE_ERR_EEPROM_VERSION; + return -EACCES; } /** @@ -2038,7 +2037,7 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { hw_dbg(hw, "auto negotiation not completed\n"); - ret_val = IXGBE_ERR_RESET_FAILED; + ret_val = -EIO; goto reset_pipeline_out; } @@ -2087,7 +2086,7 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, if (!timeout) { hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); - status = IXGBE_ERR_I2C; + status = -EIO; goto release_i2c_access; } } @@ -2141,7 +2140,7 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, if (!timeout) { hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); - status = IXGBE_ERR_I2C; + status = -EIO; goto release_i2c_access; } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 878dd8dff5..b2a0f2aaa0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -124,7 +124,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; } /* @@ -215,7 +215,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) break; default: hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } if (hw->mac.type != ixgbe_mac_X540) { @@ -500,7 +500,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, if (pba_num == NULL) { hw_dbg(hw, "PBA string buffer was null\n"); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); @@ -526,7 +526,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, /* we will need 11 characters to store the PBA */ if (pba_num_size < 11) { hw_dbg(hw, "PBA string buffer too small\n"); - return IXGBE_ERR_NO_SPACE; + return -ENOSPC; } /* extract hex string from data and pba_ptr */ @@ -563,13 +563,13 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, if (length == 0xFFFF || length == 0) { hw_dbg(hw, "NVM PBA number section invalid length\n"); - return IXGBE_ERR_PBA_SECTION; + return -EIO; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { hw_dbg(hw, "PBA string buffer too small\n"); - return IXGBE_ERR_NO_SPACE; + return -ENOSPC; } /* trim pba length from start of string */ @@ -805,7 +805,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); if (index > 3) - return IXGBE_ERR_PARAM; + return -EINVAL; /* To turn on the LED, set mode to ON. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -826,7 +826,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); if (index > 3) - return IXGBE_ERR_PARAM; + return -EINVAL; /* To turn off the LED, set mode to OFF. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); @@ -904,11 +904,8 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; - - if (offset + words > hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (words == 0 || (offset + words > hw->eeprom.word_size)) + return -EINVAL; /* * The EEPROM page size cannot be queried from the chip. We do lazy @@ -962,7 +959,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, if (ixgbe_ready_eeprom(hw) != 0) { ixgbe_release_eeprom(hw); - return IXGBE_ERR_EEPROM; + return -EIO; } for (i = 0; i < words; i++) { @@ -1028,7 +1025,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) hw->eeprom.ops.init_params(hw); if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + return -EINVAL; return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); } @@ -1050,11 +1047,8 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; - - if (offset + words > hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (words == 0 || (offset + words > hw->eeprom.word_size)) + return -EINVAL; /* * We cannot hold synchronization semaphores for too long @@ -1099,7 +1093,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, if (ixgbe_ready_eeprom(hw) != 0) { ixgbe_release_eeprom(hw); - return IXGBE_ERR_EEPROM; + return -EIO; } for (i = 0; i < words; i++) { @@ -1142,7 +1136,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, hw->eeprom.ops.init_params(hw); if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + return -EINVAL; return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); } @@ -1165,11 +1159,8 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; - - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (words == 0 || offset >= hw->eeprom.word_size) + return -EINVAL; for (i = 0; i < words; i++) { eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | @@ -1262,11 +1253,8 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, hw->eeprom.ops.init_params(hw); - if (words == 0) - return IXGBE_ERR_INVALID_ARGUMENT; - - if (offset >= hw->eeprom.word_size) - return IXGBE_ERR_EEPROM; + if (words == 0 || offset >= hw->eeprom.word_size) + return -EINVAL; for (i = 0; i < words; i++) { eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | @@ -1328,7 +1316,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) } udelay(5); } - return IXGBE_ERR_EEPROM; + return -EIO; } /** @@ -1344,7 +1332,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) u32 i; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); @@ -1366,7 +1354,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) hw_dbg(hw, "Could not acquire EEPROM grant\n"); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - return IXGBE_ERR_EEPROM; + return -EIO; } /* Setup EEPROM for Read/Write */ @@ -1419,7 +1407,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (swsm & IXGBE_SWSM_SMBI) { hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } } @@ -1447,7 +1435,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) if (i >= timeout) { hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); ixgbe_release_eeprom_semaphore(hw); - return IXGBE_ERR_EEPROM; + return -EIO; } return 0; @@ -1503,7 +1491,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) */ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { hw_dbg(hw, "SPI EEPROM Status error\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } return 0; @@ -1715,7 +1703,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { if (hw->eeprom.ops.read(hw, i, &pointer)) { hw_dbg(hw, "EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } /* If the pointer seems invalid */ @@ -1724,7 +1712,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) if (hw->eeprom.ops.read(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } if (length == 0xFFFF || length == 0) @@ -1733,7 +1721,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) for (j = pointer + 1; j <= pointer + length; j++) { if (hw->eeprom.ops.read(hw, j, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } checksum += word; } @@ -1786,7 +1774,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, * calculated checksum */ if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; + status = -EIO; /* If the user cares, return the calculated checksum */ if (checksum_val) @@ -1845,7 +1833,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", index); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } /* setup VMDq pool selection before this RAR gets enabled */ @@ -1897,7 +1885,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", index); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } /* @@ -2146,7 +2134,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) /* Validate the water mark configuration. */ if (!hw->fc.pause_time) - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; /* Low water mark of zero causes XOFF floods */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { @@ -2155,7 +2143,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { hw_dbg(hw, "Invalid water mark configuration\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; } } } @@ -2212,7 +2200,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) break; default: hw_dbg(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } /* Set 802.3x based flow control settings. */ @@ -2269,7 +2257,7 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { if ((!(adv_reg)) || (!(lp_reg))) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + return -EINVAL; if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { /* @@ -2321,7 +2309,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + return -EIO; pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); @@ -2353,12 +2341,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) */ links = IXGBE_READ_REG(hw, IXGBE_LINKS); if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + return -EIO; if (hw->mac.type == ixgbe_mac_82599EB) { links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) - return IXGBE_ERR_FC_NOT_NEGOTIATED; + return -EIO; } /* * Read the 10g AN autoc and LP ability registers and resolve @@ -2407,8 +2395,8 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) **/ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) { - s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ixgbe_link_speed speed; + s32 ret_val = -EIO; bool link_up; /* @@ -2510,7 +2498,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Disables PCI-Express primary access and verifies there are no pending - * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable + * requests. -EALREADY is returned if primary disable * bit hasn't caused the primary requests to be disabled, else 0 * is returned signifying primary requests disabled. **/ @@ -2575,7 +2563,7 @@ gio_disable_fail: } hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); - return IXGBE_ERR_PRIMARY_REQUESTS_PENDING; + return -EALREADY; } /** @@ -2600,7 +2588,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_eeprom_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); if (!(gssr & (fwmask | swmask))) { @@ -2620,7 +2608,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); usleep_range(5000, 10000); - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; } /** @@ -2757,7 +2745,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) s32 ret_val; if (index > 3) - return IXGBE_ERR_PARAM; + return -EINVAL; /* * Link must be up to auto-blink the LEDs; @@ -2803,7 +2791,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) s32 ret_val; if (index > 3) - return IXGBE_ERR_PARAM; + return -EINVAL; ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); if (ret_val) @@ -2963,7 +2951,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); @@ -3014,7 +3002,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } if (vmdq < 32) { @@ -3091,7 +3079,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) * will simply bypass the VLVF if there are no entries present in the * VLVF that contain our VLAN */ - first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; + first_empty_slot = vlvf_bypass ? -ENOSPC : 0; /* add VLAN enable bit for comparison */ vlan |= IXGBE_VLVF_VIEN; @@ -3115,7 +3103,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) if (!first_empty_slot) hw_dbg(hw, "No space in VLVF.\n"); - return first_empty_slot ? : IXGBE_ERR_NO_SPACE; + return first_empty_slot ? : -ENOSPC; } /** @@ -3135,7 +3123,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, s32 vlvf_index; if ((vlan > 4095) || (vind > 63)) - return IXGBE_ERR_PARAM; + return -EINVAL; /* * this is a 2 part operation - first the VFTA, then the @@ -3611,7 +3599,8 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * * Communicates with the manageability block. On success return 0 * else returns semaphore error when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * semaphore, -EINVAL when incorrect parameters passed or -EIO when + * command fails. * * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held * by the caller. @@ -3624,7 +3613,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return -EINVAL; } /* Set bit 9 of FWSTS clearing FW reset indication */ @@ -3635,13 +3624,13 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return -EIO; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; } dword_len = length >> 2; @@ -3666,7 +3655,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, /* Check command successful completion. */ if ((timeout && i == timeout) || !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return -EIO; return 0; } @@ -3686,7 +3675,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, * in these cases. * * Communicates with the manageability block. On success return 0 - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + * else return -EIO or -EINVAL. **/ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, u32 length, u32 timeout, @@ -3701,7 +3690,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return -EINVAL; } /* Take management host interface semaphore */ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); @@ -3731,7 +3720,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, if (length < round_up(buf_len, 4) + hdr_size) { hw_dbg(hw, "Buffer not large enough for reply message.\n"); - status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = -EIO; goto rel_out; } @@ -3762,8 +3751,8 @@ rel_out: * * Sends driver version number to firmware through the manageability * block. On success return 0 - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * else returns -EBUSY when encountering an error acquiring + * semaphore or -EIO when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, __always_unused u16 len, @@ -3799,7 +3788,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, FW_CEM_RESP_STATUS_SUCCESS) ret_val = 0; else - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + ret_val = -EIO; break; } @@ -3897,14 +3886,14 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, return status; if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) - return IXGBE_NOT_IMPLEMENTED; + return -EOPNOTSUPP; status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); if (status) return status; if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) - return IXGBE_NOT_IMPLEMENTED; + return -EOPNOTSUPP; return 0; } @@ -3927,7 +3916,7 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) /* Only support thermal sensors attached to physical port 0 */ if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) - return IXGBE_NOT_IMPLEMENTED; + return -EOPNOTSUPP; status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); if (status) @@ -3987,7 +3976,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) /* Only support thermal sensors attached to physical port 0 */ if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) - return IXGBE_NOT_IMPLEMENTED; + return -EOPNOTSUPP; status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); if (status) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0bbad4a5cc..e47461f3ea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1413,11 +1413,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: for (i = 0; i < IXGBE_TEST_LEN; i++) - ethtool_sprintf(&p, ixgbe_gstrings_test[i]); + ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]); break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", ixgbe_gstrings_stats[i].stat_string); for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); @@ -3370,7 +3370,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + s32 status = -EFAULT; u8 databyte = 0xFF; int i = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dd03b017df..6a3f633406 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2421,7 +2421,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (xdp_xmit & IXGBE_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); @@ -2756,7 +2756,6 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; - s32 rc; if (test_bit(__IXGBE_DOWN, &adapter->state)) return; @@ -2790,14 +2789,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) } /* Check if this is not due to overtemp */ - if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) + if (!hw->phy.ops.check_overtemp(hw)) return; break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: - rc = hw->phy.ops.check_overtemp(hw); - if (rc != IXGBE_ERR_OVERTEMP) + if (!hw->phy.ops.check_overtemp(hw)) return; break; default: @@ -5512,7 +5510,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) { u32 speed; bool autoneg, link_up = false; - int ret = IXGBE_ERR_LINK_SETUP; + int ret = -EIO; if (hw->mac.ops.check_link) ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); @@ -5983,13 +5981,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) err = hw->mac.ops.init_hw(hw); switch (err) { case 0: - case IXGBE_ERR_SFP_NOT_PRESENT: - case IXGBE_ERR_SFP_NOT_SUPPORTED: + case -ENOENT: + case -EOPNOTSUPP: break; - case IXGBE_ERR_PRIMARY_REQUESTS_PENDING: + case -EALREADY: e_dev_err("primary disable timed out\n"); break; - case IXGBE_ERR_EEPROM_VERSION: + case -EACCES: /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " "Please be aware there may be issues associated with " @@ -7829,10 +7827,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; err = hw->phy.ops.identify_sfp(hw); - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + if (err == -EOPNOTSUPP) goto sfp_out; - if (err == IXGBE_ERR_SFP_NOT_PRESENT) { + if (err == -ENOENT) { /* If no cable is present, then we need to reset * the next time we find a good cable. */ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; @@ -7858,7 +7856,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) else err = hw->mac.ops.setup_sfp(hw); - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + if (err == -EOPNOTSUPP) goto sfp_out; adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; @@ -7867,8 +7865,8 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) sfp_out: clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && - (adapter->netdev->reg_state == NETREG_REGISTERED)) { + if (err == -EOPNOTSUPP && + adapter->netdev->reg_state == NETREG_REGISTERED) { e_dev_err("failed to initialize because an unsupported " "SFP+ module type was detected.\n"); e_dev_err("Reload the driver after installing a " @@ -7938,7 +7936,7 @@ static void ixgbe_service_timer(struct timer_list *t) static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 status; + bool overtemp; if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) return; @@ -7948,11 +7946,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) if (!hw->phy.ops.handle_lasi) return; - status = hw->phy.ops.handle_lasi(&adapter->hw); - if (status != IXGBE_ERR_OVERTEMP) - return; - - e_crit(drv, "%s\n", ixgbe_overheat_msg); + hw->phy.ops.handle_lasi(&adapter->hw, &overtemp); + if (overtemp) + e_crit(drv, "%s\n", ixgbe_overheat_msg); } static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) @@ -10922,9 +10918,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = hw->mac.ops.reset_hw(hw); hw->phy.reset_if_overtemp = false; ixgbe_set_eee_capable(adapter); - if (err == IXGBE_ERR_SFP_NOT_PRESENT) { + if (err == -ENOENT) { err = 0; - } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + } else if (err == -EOPNOTSUPP) { e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); e_dev_err("Reload the driver after installing a supported module.\n"); goto err_sw_init; @@ -11143,7 +11139,7 @@ skip_sriov: /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); - if (err == IXGBE_ERR_EEPROM_VERSION) { + if (err == -EACCES) { /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " "Please be aware there may be issues associated " diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 5679293e53..fe7ef57733 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -24,7 +24,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) size = mbx->size; if (!mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; return mbx->ops->read(hw, msg, size, mbx_id); } @@ -43,10 +43,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; if (size > mbx->size) - return IXGBE_ERR_MBX; + return -EINVAL; if (!mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; return mbx->ops->write(hw, msg, size, mbx_id); } @@ -63,7 +63,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; if (!mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; return mbx->ops->check_for_msg(hw, mbx_id); } @@ -80,7 +80,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; if (!mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; return mbx->ops->check_for_ack(hw, mbx_id); } @@ -97,7 +97,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; if (!mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; return mbx->ops->check_for_rst(hw, mbx_id); } @@ -115,12 +115,12 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) int countdown = mbx->timeout; if (!countdown || !mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; while (mbx->ops->check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) - return IXGBE_ERR_MBX; + return -EIO; udelay(mbx->usec_delay); } @@ -140,12 +140,12 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) int countdown = mbx->timeout; if (!countdown || !mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; while (mbx->ops->check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) - return IXGBE_ERR_MBX; + return -EIO; udelay(mbx->usec_delay); } @@ -169,7 +169,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, s32 ret_val; if (!mbx->ops) - return IXGBE_ERR_MBX; + return -EIO; ret_val = ixgbe_poll_for_msg(hw, mbx_id); if (ret_val) @@ -197,7 +197,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops || !mbx->timeout) - return IXGBE_ERR_MBX; + return -EIO; /* send msg */ ret_val = mbx->ops->write(hw, msg, size, mbx_id); @@ -217,7 +217,7 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) return 0; } - return IXGBE_ERR_MBX; + return -EIO; } /** @@ -238,7 +238,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) return 0; } - return IXGBE_ERR_MBX; + return -EIO; } /** @@ -259,7 +259,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) return 0; } - return IXGBE_ERR_MBX; + return -EIO; } /** @@ -295,7 +295,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) return 0; } - return IXGBE_ERR_MBX; + return -EIO; } /** @@ -317,7 +317,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) return 0; - return IXGBE_ERR_MBX; + return -EIO; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 8f4316b192..6434c190e7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -7,7 +7,6 @@ #include "ixgbe_type.h" #define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 #define IXGBE_VFMAILBOX 0x002FC #define IXGBE_VFMBMEM 0x00200 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 689470c1e8..930dc50719 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -102,7 +102,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, csum = ~csum; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) @@ -150,7 +150,7 @@ fail: hw_dbg(hw, "I2C byte read combined error.\n"); } while (retry < max_retry); - return IXGBE_ERR_I2C; + return -EIO; } /** @@ -179,7 +179,7 @@ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, csum = ~csum; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) @@ -215,7 +215,7 @@ fail: hw_dbg(hw, "I2C byte write combined error.\n"); } while (retry < max_retry); - return IXGBE_ERR_I2C; + return -EIO; } /** @@ -262,8 +262,8 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) **/ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { + u32 status = -EFAULT; u32 phy_addr; - u32 status = IXGBE_ERR_PHY_ADDR_INVALID; if (!hw->phy.phy_semaphore_mask) { if (hw->bus.lan_id) @@ -282,7 +282,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) if (ixgbe_probe_phy(hw, phy_addr)) return 0; else - return IXGBE_ERR_PHY_ADDR_INVALID; + return -EFAULT; } for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { @@ -408,8 +408,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) return status; /* Don't reset PHY if it's shut down due to overtemp. */ - if (!hw->phy.reset_if_overtemp && - (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + if (!hw->phy.reset_if_overtemp && hw->phy.ops.check_overtemp(hw)) return 0; /* Blocked by MNG FW so bail */ @@ -457,7 +456,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) if (ctrl & MDIO_CTRL1_RESET) { hw_dbg(hw, "PHY reset polling failed to complete.\n"); - return IXGBE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -500,7 +499,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY address command did not complete.\n"); - return IXGBE_ERR_PHY; + return -EIO; } /* Address cycle complete, setup and write the read @@ -527,7 +526,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY read command didn't complete\n"); - return IXGBE_ERR_PHY; + return -EIO; } /* Read operation is complete. Get the data @@ -559,7 +558,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); } else { - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; } return status; @@ -604,7 +603,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY address cmd didn't complete\n"); - return IXGBE_ERR_PHY; + return -EIO; } /* @@ -632,7 +631,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { hw_dbg(hw, "PHY write cmd didn't complete\n"); - return IXGBE_ERR_PHY; + return -EIO; } return 0; @@ -657,7 +656,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); } else { - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; } return status; @@ -1430,7 +1429,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) if ((phy_data & MDIO_CTRL1_RESET) != 0) { hw_dbg(hw, "PHY reset did not complete.\n"); - return IXGBE_ERR_PHY; + return -EIO; } /* Get init offsets */ @@ -1487,12 +1486,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) hw_dbg(hw, "SOL\n"); } else { hw_dbg(hw, "Bad control value\n"); - return IXGBE_ERR_PHY; + return -EIO; } break; default: hw_dbg(hw, "Bad control type\n"); - return IXGBE_ERR_PHY; + return -EIO; } } @@ -1500,7 +1499,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) err_eeprom: hw_err(hw, "eeprom read at offset %d failed\n", data_offset); - return IXGBE_ERR_PHY; + return -EIO; } /** @@ -1518,10 +1517,10 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) return ixgbe_identify_qsfp_module_generic(hw); default: hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; } - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; } /** @@ -1546,7 +1545,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; } /* LAN ID is needed for sfp_type determination */ @@ -1561,7 +1560,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, @@ -1752,7 +1751,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } /* Anything else 82598-based is supported */ @@ -1776,7 +1775,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) } hw_dbg(hw, "SFP+ module not supported\n"); hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } return 0; @@ -1786,7 +1785,7 @@ err_read_i2c_eeprom: hw->phy.id = 0; hw->phy.type = ixgbe_phy_unknown; } - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; } /** @@ -1813,7 +1812,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; } /* LAN ID is needed for sfp_type determination */ @@ -1827,7 +1826,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } hw->phy.id = identifier; @@ -1895,7 +1894,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) } else { /* unsupported module type */ hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } } @@ -1955,7 +1954,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) } hw_dbg(hw, "QSFP module not supported\n"); hw->phy.type = ixgbe_phy_sfp_unsupported; - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } return 0; } @@ -1966,7 +1965,7 @@ err_read_i2c_eeprom: hw->phy.id = 0; hw->phy.type = ixgbe_phy_unknown; - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; } /** @@ -1986,14 +1985,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 sfp_type = hw->phy.sfp_type; if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; /* * Limiting active cables and 1G Phys must be initialized as @@ -2014,11 +2013,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { hw_err(hw, "eeprom read at %d failed\n", IXGBE_PHY_INIT_OFFSET_NL); - return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + return -EIO; } if ((!*list_offset) || (*list_offset == 0xFFFF)) - return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + return -EIO; /* Shift offset to first ID word */ (*list_offset)++; @@ -2037,7 +2036,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, goto err_phy; if ((!*data_offset) || (*data_offset == 0xFFFF)) { hw_dbg(hw, "SFP+ module not supported\n"); - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } else { break; } @@ -2050,14 +2049,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, if (sfp_id == IXGBE_PHY_INIT_END_NL) { hw_dbg(hw, "No matching SFP+ module found\n"); - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } return 0; err_phy: hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); - return IXGBE_ERR_PHY; + return -EIO; } /** @@ -2152,7 +2151,7 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; ixgbe_i2c_start(hw); @@ -2268,7 +2267,7 @@ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u32 swfw_mask = hw->phy.phy_semaphore_mask; if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; do { ixgbe_i2c_start(hw); @@ -2510,7 +2509,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) if (ack == 1) { hw_dbg(hw, "I2C ack was not received.\n"); - status = IXGBE_ERR_I2C; + status = -EIO; } ixgbe_lower_i2c_clk(hw, &i2cctl); @@ -2582,7 +2581,7 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) udelay(IXGBE_I2C_T_LOW); } else { hw_dbg(hw, "I2C data was not set to %X\n", data); - return IXGBE_ERR_I2C; + return -EIO; } return 0; @@ -2678,7 +2677,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); - return IXGBE_ERR_I2C; + return -EIO; } return 0; @@ -2748,22 +2747,24 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Checks if the LASI temp alarm status was triggered due to overtemp + * + * Return true when an overtemp event detected, otherwise false. **/ -s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) { u16 phy_data = 0; + u32 status; if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) - return 0; + return false; /* Check that the LASI temp alarm status was triggered */ - hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, - MDIO_MMD_PMAPMD, &phy_data); - - if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) - return 0; + status = hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + MDIO_MMD_PMAPMD, &phy_data); + if (status) + return false; - return IXGBE_ERR_OVERTEMP; + return !!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM); } /** ixgbe_set_copper_phy_power - Control power for copper phy diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 6544c4539c..ef72729d7c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -155,7 +155,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); -s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index ea88ac04ab..9379069c55 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -640,12 +640,11 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { struct vf_macvlans *entry; - struct list_head *pos; + bool found = false; int retval = 0; if (index <= 1) { - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); + list_for_each_entry(entry, &adapter->vf_mvs.l, l) { if (entry->vf == vf) { entry->vf = -1; entry->free = true; @@ -663,23 +662,22 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, if (!index) return 0; - entry = NULL; - - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); - if (entry->free) + list_for_each_entry(entry, &adapter->vf_mvs.l, l) { + if (entry->free) { + found = true; break; + } } /* * If we traversed the entire list and didn't find a free entry - * then we're out of space on the RAR table. Also entry may - * be NULL because the original memory allocation for the list - * failed, which is not fatal but does mean we can't support - * VF requests for MACVLAN because we couldn't allocate - * memory for the list management required. + * then we're out of space on the RAR table. It's also possible + * for the &adapter->vf_mvs.l list to be empty because the original + * memory allocation for the list failed, which is not fatal but does + * mean we can't support VF requests for MACVLAN because we couldn't + * allocate memory for the list management required. */ - if (!entry || !entry->free) + if (!found) return -ENOSPC; retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); @@ -1329,7 +1327,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); - retval = IXGBE_ERR_MBX; + retval = -EIO; break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2b00db92b0..61b9774b3d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3509,10 +3509,10 @@ struct ixgbe_phy_operations { s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); - s32 (*check_overtemp)(struct ixgbe_hw *); + bool (*check_overtemp)(struct ixgbe_hw *); s32 (*set_phy_power)(struct ixgbe_hw *, bool on); s32 (*enter_lplu)(struct ixgbe_hw *); - s32 (*handle_lasi)(struct ixgbe_hw *hw); + s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *); s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, u8 *value); s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, @@ -3665,45 +3665,6 @@ struct ixgbe_info { const u32 *mvals; }; - -/* Error Codes */ -#define IXGBE_ERR_EEPROM -1 -#define IXGBE_ERR_EEPROM_CHECKSUM -2 -#define IXGBE_ERR_PHY -3 -#define IXGBE_ERR_CONFIG -4 -#define IXGBE_ERR_PARAM -5 -#define IXGBE_ERR_MAC_TYPE -6 -#define IXGBE_ERR_UNKNOWN_PHY -7 -#define IXGBE_ERR_LINK_SETUP -8 -#define IXGBE_ERR_ADAPTER_STOPPED -9 -#define IXGBE_ERR_INVALID_MAC_ADDR -10 -#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 -#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12 -#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 -#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 -#define IXGBE_ERR_RESET_FAILED -15 -#define IXGBE_ERR_SWFW_SYNC -16 -#define IXGBE_ERR_PHY_ADDR_INVALID -17 -#define IXGBE_ERR_I2C -18 -#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 -#define IXGBE_ERR_SFP_NOT_PRESENT -20 -#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 -#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 -#define IXGBE_ERR_FDIR_REINIT_FAILED -23 -#define IXGBE_ERR_EEPROM_VERSION -24 -#define IXGBE_ERR_NO_SPACE -25 -#define IXGBE_ERR_OVERTEMP -26 -#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 -#define IXGBE_ERR_FC_NOT_SUPPORTED -28 -#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 -#define IXGBE_ERR_PBA_SECTION -31 -#define IXGBE_ERR_INVALID_ARGUMENT -32 -#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 -#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 -#define IXGBE_ERR_FW_RESP_INVALID -39 -#define IXGBE_ERR_TOKEN_RETRY -40 -#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF - #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_300MHZ BIT(5) #define IXGBE_FUSES0_REV_MASK (3u << 6) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index d5cfb51ff6..15325c549d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -84,7 +84,7 @@ mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status) { hw_dbg(hw, "semaphore failed with %d", status); - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; } ctrl = IXGBE_CTRL_RST; @@ -103,7 +103,7 @@ mac_reset_top: } if (ctrl & IXGBE_CTRL_RST_MASK) { - status = IXGBE_ERR_RESET_FAILED; + status = -EIO; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(100); @@ -220,7 +220,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = ixgbe_read_eerd_generic(hw, offset, data); @@ -243,7 +243,7 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); @@ -264,7 +264,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = ixgbe_write_eewr_generic(hw, offset, data); @@ -287,7 +287,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data); @@ -324,7 +324,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) for (i = 0; i < checksum_last_word; i++) { if (ixgbe_read_eerd_generic(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } checksum += word; } @@ -349,7 +349,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) if (ixgbe_read_eerd_generic(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } /* Skip pointer section if length is invalid. */ @@ -360,7 +360,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) for (j = pointer + 1; j <= pointer + length; j++) { if (ixgbe_read_eerd_generic(hw, j, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } checksum += word; } @@ -397,7 +397,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) @@ -418,7 +418,7 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, */ if (read_checksum != checksum) { hw_dbg(hw, "Invalid EEPROM checksum"); - status = IXGBE_ERR_EEPROM_CHECKSUM; + status = -EIO; } /* If the user cares, return the calculated checksum */ @@ -455,7 +455,7 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) @@ -490,7 +490,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) s32 status; status = ixgbe_poll_flash_update_done_X540(hw); - if (status == IXGBE_ERR_EEPROM) { + if (status == -EIO) { hw_dbg(hw, "Flash update time out\n"); return status; } @@ -540,7 +540,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) return 0; udelay(5); } - return IXGBE_ERR_EEPROM; + return -EIO; } /** @@ -575,7 +575,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_swfw_sync_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (!(swfw_sync & (fwmask | swmask | hwmask))) { @@ -599,7 +599,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) * bits in the SW_FW_SYNC register. */ if (ixgbe_get_swfw_sync_semaphore(hw)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (swfw_sync & (fwmask | hwmask)) { swfw_sync |= swmask; @@ -622,11 +622,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) rmask |= IXGBE_GSSR_I2C_MASK; ixgbe_release_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_semaphore(hw); - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; } ixgbe_release_swfw_sync_semaphore(hw); - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; } /** @@ -680,7 +680,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) if (i == timeout) { hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); - return IXGBE_ERR_EEPROM; + return -EIO; } /* Now get the semaphore between SW/FW through the REGSMP bit */ @@ -697,7 +697,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) */ hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n"); ixgbe_release_swfw_sync_semaphore(hw); - return IXGBE_ERR_EEPROM; + return -EIO; } /** @@ -768,7 +768,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) bool link_up; if (index > 3) - return IXGBE_ERR_PARAM; + return -EINVAL; /* Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. @@ -804,7 +804,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) u32 ledctl_reg; if (index > 3) - return IXGBE_ERR_PARAM; + return -EINVAL; /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index aa4bf6c9a2..cdc912bba8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -206,13 +206,13 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) } if (retry == IXGBE_CS4227_RETRIES) { hw_err(hw, "CS4227 reset did not complete\n"); - return IXGBE_ERR_PHY; + return -EIO; } status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { hw_err(hw, "CS4227 EEPROM did not load successfully\n"); - return IXGBE_ERR_PHY; + return -EIO; } return 0; @@ -350,13 +350,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { - return IXGBE_NOT_IMPLEMENTED; + return -EOPNOTSUPP; } static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { - return IXGBE_NOT_IMPLEMENTED; + return -EOPNOTSUPP; } /** @@ -463,7 +463,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, --retries; } while (retries > 0); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return -EIO; } static const struct { @@ -511,7 +511,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) - return IXGBE_ERR_PHY_ADDR_INVALID; + return -EFAULT; hw->phy.autoneg_advertised = hw->phy.speeds_supported; hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | @@ -568,7 +568,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_err(hw, "rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; } switch (hw->fc.requested_mode) { @@ -600,8 +600,10 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); if (rc) return rc; + if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) - return IXGBE_ERR_OVERTEMP; + return -EIO; + return 0; } @@ -675,7 +677,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) *ctrl = command; if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { hw_dbg(hw, "IOSF wait timed out\n"); - return IXGBE_ERR_PHY; + return -EIO; } return 0; @@ -715,7 +717,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; hw_dbg(hw, "Failed to read, error %x\n", error); - return IXGBE_ERR_PHY; + ret = -EIO; + goto out; } if (!ret) @@ -750,9 +753,9 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) return 0; if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) - return IXGBE_ERR_FW_RESP_INVALID; + return -EIO; - return IXGBE_ERR_TOKEN_RETRY; + return -EAGAIN; } /** @@ -778,7 +781,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) return status; if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) return 0; - return IXGBE_ERR_FW_RESP_INVALID; + return -EIO; } /** @@ -942,7 +945,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, local_buffer = buf; } else { if (buffer_size < ptr) - return IXGBE_ERR_PARAM; + return -EINVAL; local_buffer = &buffer[ptr]; } @@ -960,7 +963,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, } if (buffer && ((u32)start + (u32)length > buffer_size)) - return IXGBE_ERR_PARAM; + return -EINVAL; for (i = start; length; i++, length--) { if (i == bufsz && !buffer) { @@ -1012,7 +1015,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, local_buffer = eeprom_ptrs; } else { if (buffer_size < IXGBE_EEPROM_LAST_WORD) - return IXGBE_ERR_PARAM; + return -EINVAL; local_buffer = buffer; } @@ -1148,7 +1151,7 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, * calculated checksum */ if (read_checksum != checksum) { - status = IXGBE_ERR_EEPROM_CHECKSUM; + status = -EIO; hw_dbg(hw, "Invalid EEPROM checksum"); } @@ -1203,7 +1206,7 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); } else { hw_dbg(hw, "write ee hostif failed to get semaphore"); - status = IXGBE_ERR_SWFW_SYNC; + status = -EBUSY; } return status; @@ -1415,7 +1418,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; hw_dbg(hw, "Failed to write, error %x\n", error); - return IXGBE_ERR_PHY; + return -EIO; } out: @@ -1558,7 +1561,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) /* iXFI is only supported with X552 */ if (mac->type != ixgbe_mac_X550EM_x) - return IXGBE_ERR_LINK_SETUP; + return -EIO; /* Disable AN and force speed to 10G Serial. */ status = ixgbe_read_iosf_sb_reg_x550(hw, @@ -1580,7 +1583,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) break; default: /* Other link speeds are not supported by internal KR PHY. */ - return IXGBE_ERR_LINK_SETUP; + return -EINVAL; } status = ixgbe_write_iosf_sb_reg_x550(hw, @@ -1611,7 +1614,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) { switch (hw->phy.sfp_type) { case ixgbe_sfp_type_not_present: - return IXGBE_ERR_SFP_NOT_PRESENT; + return -ENOENT; case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: *linear = true; @@ -1630,7 +1633,7 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: default: - return IXGBE_ERR_SFP_NOT_SUPPORTED; + return -EOPNOTSUPP; } return 0; @@ -1660,7 +1663,7 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, * there is no reason to configure CS4227 and SFP not present error is * not accepted in the setup MAC link flow. */ - if (status == IXGBE_ERR_SFP_NOT_PRESENT) + if (status == -ENOENT) return 0; if (status) @@ -1718,7 +1721,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) break; default: /* Other link speeds are not supported by internal PHY. */ - return IXGBE_ERR_LINK_SETUP; + return -EINVAL; } (void)mac->ops.write_iosf_sb_reg(hw, @@ -1803,7 +1806,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, /* If no SFP module present, then return success. Return success since * SFP not present error is not excepted in the setup MAC link flow. */ - if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + if (ret_val == -ENOENT) return 0; if (ret_val) @@ -1853,7 +1856,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, /* If no SFP module present, then return success. Return success since * SFP not present error is not excepted in the setup MAC link flow. */ - if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + if (ret_val == -ENOENT) return 0; if (ret_val) @@ -1863,7 +1866,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, ixgbe_setup_kr_speed_x550em(hw, speed); if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE) - return IXGBE_ERR_PHY_ADDR_INVALID; + return -EFAULT; /* Get external PHY SKU id */ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, @@ -1962,7 +1965,7 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, u16 i, autoneg_status; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; + return -EIO; status = ixgbe_check_mac_link_generic(hw, speed, link_up, link_up_wait_to_complete); @@ -2145,9 +2148,9 @@ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, */ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) { - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ixgbe_link_speed speed; + s32 status = -EIO; bool link_up; /* AN should have completed when the cable was plugged in. @@ -2165,7 +2168,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) /* Check if auto-negotiation has completed */ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { - status = IXGBE_ERR_FC_NOT_NEGOTIATED; + status = -EIO; goto out; } @@ -2369,18 +2372,18 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, * @hw: pointer to hardware structure * @lsc: pointer to boolean flag which indicates whether external Base T * PHY interrupt is lsc + * @is_overtemp: indicate whether an overtemp event encountered * * Determime if external Base T PHY interrupt cause is high temperature * failure alarm or link status change. - * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. **/ -static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc, + bool *is_overtemp) { u32 status; u16 reg; + *is_overtemp = false; *lsc = false; /* Vendor alarm triggered */ @@ -2412,7 +2415,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { /* power down the PHY in case the PHY FW didn't already */ ixgbe_set_copper_phy_power(hw, false); - return IXGBE_ERR_OVERTEMP; + *is_overtemp = true; + return -EIO; } if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { /* device fault alarm triggered */ @@ -2426,7 +2430,8 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { /* power down the PHY in case the PHY FW didn't */ ixgbe_set_copper_phy_power(hw, false); - return IXGBE_ERR_OVERTEMP; + *is_overtemp = true; + return -EIO; } } @@ -2462,12 +2467,12 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) **/ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) { + bool lsc, overtemp; u32 status; u16 reg; - bool lsc; /* Clear interrupt flags */ - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, &overtemp); /* Enable link status change alarm */ @@ -2546,21 +2551,20 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) /** * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt * @hw: pointer to hardware structure + * @is_overtemp: indicate whether an overtemp event encountered * * Handle external Base T PHY interrupt. If high temperature * failure alarm then return error, else if link status change * then setup internal/external PHY link - * - * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature - * failure alarm, else return PHY access status. **/ -static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw, + bool *is_overtemp) { struct ixgbe_phy_info *phy = &hw->phy; bool lsc; u32 status; - status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc, is_overtemp); if (status) return status; @@ -2692,7 +2696,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) u16 speed; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) - return IXGBE_ERR_CONFIG; + return -EIO; if (!(hw->mac.type == ixgbe_mac_X550EM_x && !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { @@ -2735,7 +2739,7 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) break; default: /* Internal PHY does not support anything else */ - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; } return ixgbe_setup_ixfi_x550em(hw, &force_speed); @@ -2767,7 +2771,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) u16 phy_data; if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; + return -EINVAL; /* To turn on the LED, set mode to ON. */ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, @@ -2789,7 +2793,7 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) u16 phy_data; if (led_idx >= IXGBE_X557_MAX_LED_INDEX) - return IXGBE_ERR_PARAM; + return -EINVAL; /* To turn on the LED, set mode to ON. */ hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, @@ -2813,8 +2817,9 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) * * Sends driver version number to firmware through the manageability * block. On success return 0 - * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring - * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * else returns -EBUSY when encountering an error acquiring + * semaphore, -EIO when command fails or -ENIVAL when incorrect + * params passed. **/ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, u16 len, @@ -2825,7 +2830,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, int i; if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) - return IXGBE_ERR_INVALID_ARGUMENT; + return -EINVAL; fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; @@ -2850,7 +2855,7 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, if (fw_cmd.hdr.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS) - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + return -EIO; return 0; } @@ -2907,7 +2912,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; } /* 10gig parts do not have a word in the EEPROM to determine the @@ -2942,7 +2947,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) break; default: hw_err(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } switch (hw->device_id) { @@ -2986,8 +2991,8 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) { u32 link_s1, lp_an_page_low, an_cntl_1; - s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ixgbe_link_speed speed; + s32 status = -EIO; bool link_up; /* AN should have completed when the cable was plugged in. @@ -3013,7 +3018,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { hw_dbg(hw, "Auto-Negotiation did not complete\n"); - status = IXGBE_ERR_FC_NOT_NEGOTIATED; + status = -EIO; goto out; } @@ -3187,21 +3192,23 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) /** * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp * @hw: pointer to hardware structure + * + * Return true when an overtemp event detected, otherwise false. */ -static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) +static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) { u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; s32 rc; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); if (rc) - return rc; + return false; if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { ixgbe_shutdown_fw_phy(hw); - return IXGBE_ERR_OVERTEMP; + return true; } - return 0; + return false; } /** @@ -3251,8 +3258,7 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); - if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || - ret_val == IXGBE_ERR_PHY_ADDR_INVALID) + if (ret_val == -EOPNOTSUPP || ret_val == -EFAULT) return ret_val; /* Setup function pointers based on detected hardware */ @@ -3460,8 +3466,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) /* PHY ops must be identified and initialized prior to reset */ status = hw->phy.ops.init(hw); - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || - status == IXGBE_ERR_PHY_ADDR_INVALID) + if (status == -EOPNOTSUPP || status == -EFAULT) return status; /* start the external PHY */ @@ -3477,7 +3482,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) hw->phy.sfp_setup_needed = false; } - if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + if (status == -EOPNOTSUPP) return status; /* Reset PHY */ @@ -3501,7 +3506,7 @@ mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status) { hw_dbg(hw, "semaphore failed with %d", status); - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; } ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); @@ -3519,7 +3524,7 @@ mac_reset_top: } if (ctrl & IXGBE_CTRL_RST_MASK) { - status = IXGBE_ERR_RESET_FAILED; + status = -EIO; hw_dbg(hw, "Reset polling failed to complete.\n"); } @@ -3615,7 +3620,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - return IXGBE_ERR_INVALID_LINK_SETTINGS; + return -EINVAL; } if (hw->fc.requested_mode == ixgbe_fc_default) @@ -3672,7 +3677,7 @@ static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) break; default: hw_err(hw, "Flow control param set incorrectly\n"); - return IXGBE_ERR_CONFIG; + return -EIO; } status = hw->mac.ops.write_iosf_sb_reg(hw, @@ -3768,7 +3773,7 @@ static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) return 0; if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); - if (status != IXGBE_ERR_TOKEN_RETRY) + if (status != -EAGAIN) return status; msleep(FW_PHY_TOKEN_DELAY); } @@ -3812,7 +3817,7 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, mask)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); @@ -3838,7 +3843,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, s32 status; if (hw->mac.ops.acquire_swfw_sync(hw, mask)) - return IXGBE_ERR_SWFW_SYNC; + return -EBUSY; status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, mask); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 1703c640a4..59798bc332 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -351,7 +351,7 @@ construct_skb: } if (xdp_xmit & IXGBE_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 5f6ae11212..81cf3361a1 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1380,13 +1380,11 @@ static int korina_probe(struct platform_device *pdev) return rc; } -static int korina_remove(struct platform_device *pdev) +static void korina_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); - - return 0; } #ifdef CONFIG_OF @@ -1405,7 +1403,7 @@ static struct platform_driver korina_driver = { .of_match_table = of_match_ptr(korina_match), }, .probe = korina_probe, - .remove = korina_remove, + .remove_new = korina_remove, }; module_platform_driver(korina_driver); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index f5961bdcc4..1d5b7bb638 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -721,8 +721,7 @@ err_out: return err; } -static int -ltq_etop_remove(struct platform_device *pdev) +static void ltq_etop_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -732,11 +731,10 @@ ltq_etop_remove(struct platform_device *pdev) ltq_etop_mdio_cleanup(dev); unregister_netdev(dev); } - return 0; } static struct platform_driver ltq_mii_driver = { - .remove = ltq_etop_remove, + .remove_new = ltq_etop_remove, .driver = { .name = "ltq_etop", }, diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 8d646c7f8c..8bd4def362 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -641,7 +641,7 @@ err_uninit_dma: return err; } -static int xrx200_remove(struct platform_device *pdev) +static void xrx200_remove(struct platform_device *pdev) { struct xrx200_priv *priv = platform_get_drvdata(pdev); struct net_device *net_dev = priv->net_dev; @@ -659,8 +659,6 @@ static int xrx200_remove(struct platform_device *pdev) /* shut down hardware */ xrx200_hw_cleanup(priv); - - return 0; } static const struct of_device_id xrx200_match[] = { @@ -671,7 +669,7 @@ MODULE_DEVICE_TABLE(of, xrx200_match); static struct platform_driver xrx200_driver = { .probe = xrx200_probe, - .remove = xrx200_remove, + .remove_new = xrx200_remove, .driver = { .name = "lantiq,xrx200-net", .of_match_table = xrx200_match, diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c index ffa9605907..5182fe737c 100644 --- a/drivers/net/ethernet/litex/litex_liteeth.c +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -294,13 +294,11 @@ static int liteeth_probe(struct platform_device *pdev) return 0; } -static int liteeth_remove(struct platform_device *pdev) +static void liteeth_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); unregister_netdev(netdev); - - return 0; } static const struct of_device_id liteeth_of_match[] = { @@ -311,7 +309,7 @@ MODULE_DEVICE_TABLE(of, liteeth_of_match); static struct platform_driver liteeth_driver = { .probe = liteeth_probe, - .remove = liteeth_remove, + .remove_new = liteeth_remove, .driver = { .name = DRV_NAME, .of_match_table = liteeth_of_match, diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 3b129a1c33..f0bdc06d25 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2892,19 +2892,18 @@ err_put_clk: return ret; } -static int mv643xx_eth_shared_remove(struct platform_device *pdev) +static void mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); mv643xx_eth_shared_of_remove(); if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); - return 0; } static struct platform_driver mv643xx_eth_shared_driver = { .probe = mv643xx_eth_shared_probe, - .remove = mv643xx_eth_shared_remove, + .remove_new = mv643xx_eth_shared_remove, .driver = { .name = MV643XX_ETH_SHARED_NAME, .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), @@ -3279,7 +3278,7 @@ out: return err; } -static int mv643xx_eth_remove(struct platform_device *pdev) +static void mv643xx_eth_remove(struct platform_device *pdev) { struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); struct net_device *dev = mp->dev; @@ -3293,8 +3292,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev) clk_disable_unprepare(mp->clk); free_netdev(mp->dev); - - return 0; } static void mv643xx_eth_shutdown(struct platform_device *pdev) @@ -3311,7 +3308,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) static struct platform_driver mv643xx_eth_driver = { .probe = mv643xx_eth_probe, - .remove = mv643xx_eth_remove, + .remove_new = mv643xx_eth_remove, .shutdown = mv643xx_eth_shutdown, .driver = { .name = MV643XX_ETH_NAME, diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 674913184e..5f66f779e5 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -58,11 +59,6 @@ * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled) */ #define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */ -#define MVMDIO_SMI_POLL_INTERVAL_MIN 45 -#define MVMDIO_SMI_POLL_INTERVAL_MAX 55 - -#define MVMDIO_XSMI_POLL_INTERVAL_MIN 150 -#define MVMDIO_XSMI_POLL_INTERVAL_MAX 160 struct orion_mdio_dev { void __iomem *regs; @@ -84,8 +80,6 @@ enum orion_mdio_bus_type { struct orion_mdio_ops { int (*is_done)(struct orion_mdio_dev *); - unsigned int poll_interval_min; - unsigned int poll_interval_max; }; /* Wait for the SMI unit to be ready for another operation @@ -94,34 +88,23 @@ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops, struct mii_bus *bus) { struct orion_mdio_dev *dev = bus->priv; - unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT); - unsigned long end = jiffies + timeout; - int timedout = 0; + unsigned long timeout; + int done; - while (1) { - if (ops->is_done(dev)) + if (dev->err_interrupt <= 0) { + if (!read_poll_timeout_atomic(ops->is_done, done, done, 2, + MVMDIO_SMI_TIMEOUT, false, dev)) + return 0; + } else { + /* wait_event_timeout does not guarantee a delay of at + * least one whole jiffie, so timeout must be no less + * than two. + */ + timeout = max(usecs_to_jiffies(MVMDIO_SMI_TIMEOUT), 2); + + if (wait_event_timeout(dev->smi_busy_wait, + ops->is_done(dev), timeout)) return 0; - else if (timedout) - break; - - if (dev->err_interrupt <= 0) { - usleep_range(ops->poll_interval_min, - ops->poll_interval_max); - - if (time_is_before_jiffies(end)) - ++timedout; - } else { - /* wait_event_timeout does not guarantee a delay of at - * least one whole jiffie, so timeout must be no less - * than two. - */ - if (timeout < 2) - timeout = 2; - wait_event_timeout(dev->smi_busy_wait, - ops->is_done(dev), timeout); - - ++timedout; - } } dev_err(bus->parent, "Timeout: SMI busy for too long\n"); @@ -135,8 +118,6 @@ static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev) static const struct orion_mdio_ops orion_mdio_smi_ops = { .is_done = orion_mdio_smi_is_done, - .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN, - .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX, }; static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id, @@ -194,8 +175,6 @@ static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev) static const struct orion_mdio_ops orion_mdio_xsmi_ops = { .is_done = orion_mdio_xsmi_is_done, - .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN, - .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX, }; static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id, @@ -388,7 +367,7 @@ out_clk: return ret; } -static int orion_mdio_remove(struct platform_device *pdev) +static void orion_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct orion_mdio_dev *dev = bus->priv; @@ -404,8 +383,6 @@ static int orion_mdio_remove(struct platform_device *pdev) clk_disable_unprepare(dev->clk[i]); clk_put(dev->clk[i]); } - - return 0; } static const struct of_device_id orion_mdio_match[] = { @@ -426,7 +403,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, - .remove = orion_mdio_remove, + .remove_new = orion_mdio_remove, .driver = { .name = "orion-mdio", .of_match_table = orion_mdio_match, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 165f76d123..29aac32757 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2520,7 +2520,7 @@ next: mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); if (ps.xdp_redirect) - xdp_do_flush_map(); + xdp_do_flush(); if (ps.rx_packets) mvneta_update_stats(pp, &ps); @@ -5737,7 +5737,7 @@ err_free_irq: } /* Device removal routine */ -static int mvneta_remove(struct platform_device *pdev) +static void mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct mvneta_port *pp = netdev_priv(dev); @@ -5756,8 +5756,6 @@ static int mvneta_remove(struct platform_device *pdev) 1 << pp->id); mvneta_bm_put(pp->bm_priv); } - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -5883,7 +5881,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match); static struct platform_driver mvneta_driver = { .probe = mvneta_probe, - .remove = mvneta_remove, + .remove_new = mvneta_remove, .driver = { .name = MVNETA_DRIVER_NAME, .of_match_table = mvneta_match, diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 46c942ef22..3f46a0fed0 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -457,7 +457,7 @@ err_clk: return err; } -static int mvneta_bm_remove(struct platform_device *pdev) +static void mvneta_bm_remove(struct platform_device *pdev) { struct mvneta_bm *priv = platform_get_drvdata(pdev); u8 all_ports_map = 0xff; @@ -475,8 +475,6 @@ static int mvneta_bm_remove(struct platform_device *pdev) mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); clk_disable_unprepare(priv->clk); - - return 0; } static const struct of_device_id mvneta_bm_match[] = { @@ -487,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match); static struct platform_driver mvneta_bm_driver = { .probe = mvneta_bm_probe, - .remove = mvneta_bm_remove, + .remove_new = mvneta_bm_remove, .driver = { .name = MVNETA_BM_DRIVER_NAME, .of_match_table = mvneta_bm_match, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index aca17082b9..065f07392c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4052,7 +4052,7 @@ err_drop_frame: } if (xdp_ret & MVPP2_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (ps.rx_packets) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); @@ -5856,7 +5856,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, v->type = MVPP2_QUEUE_VECTOR_SHARED; if (port->flags & MVPP2_F_DT_COMPAT) - strncpy(irqname, "rx-shared", sizeof(irqname)); + strscpy(irqname, "rx-shared", sizeof(irqname)); } if (port_node) @@ -7687,7 +7687,7 @@ err_pp_clk: return err; } -static int mvpp2_remove(struct platform_device *pdev) +static void mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); struct fwnode_handle *fwnode = pdev->dev.fwnode; @@ -7725,15 +7725,13 @@ static int mvpp2_remove(struct platform_device *pdev) } if (is_acpi_node(port_fwnode)) - return 0; + return; clk_disable_unprepare(priv->axi_clk); clk_disable_unprepare(priv->mg_core_clk); clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); - - return 0; } static const struct of_device_id mvpp2_match[] = { @@ -7759,7 +7757,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); static struct platform_driver mvpp2_driver = { .probe = mvpp2_probe, - .remove = mvpp2_remove, + .remove_new = mvpp2_remove, .driver = { .name = MVPP2_DRIVER_NAME, .of_match_table = mvpp2_match, diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c index 90c3a41993..d4ee245467 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c @@ -16,9 +16,6 @@ #define CTRL_MBOX_MAX_PF 128 #define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF)) -#define FW_HB_INTERVAL_IN_SECS 1 -#define FW_HB_MISS_COUNT 10 - /* Names of Hardware non-queue generic interrupts */ static char *cn93_non_ioq_msix_names[] = { "epf_ire_rint", @@ -250,12 +247,11 @@ static void octep_init_config_cn93_pf(struct octep_device *oct) link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link); } conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + - (0x400000ull * 7) + + CN93_PEM_BAR4_INDEX_OFFSET + (link * CTRL_MBOX_SZ); - conf->hb_interval = FW_HB_INTERVAL_IN_SECS; - conf->max_hb_miss_cnt = FW_HB_MISS_COUNT; - + conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL; + conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT; } /* Setup registers for a hardware Tx Queue */ @@ -373,34 +369,40 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); } -/* Process non-ioq interrupts required to keep pf interface running. - * OEI_RINT is needed for control mailbox - */ -static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct) -{ - bool handled = false; - u64 reg0; - - /* Check for OEI INTR */ - reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); - if (reg0) { - dev_info(&oct->pdev->dev, - "Received OEI_RINT intr: 0x%llx\n", - reg0); - octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0); - if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) +/* Poll OEI events like heartbeat */ +static void octep_poll_oei_cn93_pf(struct octep_device *oct) +{ + u64 reg; + + reg = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); + if (reg) { + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg); + if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) queue_work(octep_wq, &oct->ctrl_mbox_task); - else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) + else if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) atomic_set(&oct->hb_miss_cnt, 0); - - handled = true; } +} + +/* OEI interrupt handler */ +static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + + octep_poll_oei_cn93_pf(oct); + return IRQ_HANDLED; +} - return handled; +/* Process non-ioq interrupts required to keep pf interface running. + * OEI_RINT is needed for control mailbox + */ +static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct) +{ + octep_poll_oei_cn93_pf(oct); } -/* Interrupts handler for all non-queue generic interrupts. */ -static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) +/* Interrupt handler for input ring error interrupts. */ +static irqreturn_t octep_ire_intr_handler_cn93_pf(void *dev) { struct octep_device *oct = (struct octep_device *)dev; struct pci_dev *pdev = oct->pdev; @@ -425,8 +427,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) reg_val); } } - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for output ring error interrupts. */ +static irqreturn_t octep_ore_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; + int i = 0; /* Check for ORERR INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); @@ -444,9 +455,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) reg_val); } } - - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for vf input ring error interrupts. */ +static irqreturn_t octep_vfire_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for VFIRE INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); @@ -454,8 +472,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received VFIRE_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for vf output ring error interrupts. */ +static irqreturn_t octep_vfore_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for VFORE INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); @@ -463,19 +489,30 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received VFORE_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} - /* Check for MBOX INTR and OEI INTR */ - if (octep_poll_non_ioq_interrupts_cn93_pf(oct)) - goto irq_handled; +/* Interrupt handler for dpi dma related interrupts. */ +static irqreturn_t octep_dma_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + u64 reg_val = 0; /* Check for DMA INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); if (reg_val) { octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for dpi dma transaction error interrupts for VFs */ +static irqreturn_t octep_dma_vf_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for DMA VF INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); @@ -483,8 +520,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for pp transaction error interrupts for VFs */ +static irqreturn_t octep_pp_vf_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for PPVF INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); @@ -492,8 +537,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received PP_VF_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for mac related interrupts. */ +static irqreturn_t octep_misc_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for MISC INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); @@ -501,11 +554,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received MISC_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupts handler for all reserved interrupts. */ +static irqreturn_t octep_rsvd_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); -irq_handled: return IRQ_HANDLED; } @@ -569,8 +628,15 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); + + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); + + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL); } /* Disable all interrupts */ @@ -588,8 +654,15 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); + + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); + + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL); } /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ @@ -722,7 +795,16 @@ void octep_device_setup_cn93_pf(struct octep_device *oct) oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; - oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; + oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf; + oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf; + oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf; + oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cn93_pf; + oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cn93_pf; + oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cn93_pf; + oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cn93_pf; + oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cn93_pf; + oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf; + oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf; oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h index df7cd39d9f..1622a6ebf0 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h @@ -49,6 +49,11 @@ /* Default MTU */ #define OCTEP_DEFAULT_MTU 1500 +/* pf heartbeat interval in milliseconds */ +#define OCTEP_DEFAULT_FW_HB_INTERVAL 1000 +/* pf heartbeat miss count */ +#define OCTEP_DEFAULT_FW_HB_MISS_COUNT 20 + /* Macros to get octeon config params */ #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) #define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) @@ -181,6 +186,16 @@ struct octep_ctrl_mbox_config { void __iomem *barmem_addr; }; +/* Info from firmware */ +struct octep_fw_info { + /* interface pkind */ + u16 pkind; + /* heartbeat interval in milliseconds */ + u16 hb_interval; + /* heartbeat miss count */ + u16 hb_miss_count; +}; + /* Data Structure to hold configuration limits and active config */ struct octep_config { /* Input Queue attributes. */ @@ -201,10 +216,7 @@ struct octep_config { /* ctrl mbox config */ struct octep_ctrl_mbox_config ctrl_mbox_cfg; - /* Configured maximum heartbeat miss count */ - u32 max_hb_miss_cnt; - - /* Configured firmware heartbeat interval in secs */ - u32 hb_interval; + /* fw info */ + struct octep_fw_info fw_info; }; #endif /* _OCTEP_CONFIG_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c index 17bfd5cdf4..0594607a25 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -26,7 +26,7 @@ static atomic_t ctrl_net_msg_id; /* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */ static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = { - [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] = + [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] = OCTEP_CP_VERSION(1, 0, 0) }; @@ -353,6 +353,28 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct) } } +int octep_ctrl_net_get_info(struct octep_device *oct, int vfid, + struct octep_fw_info *info) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_net_h2f_req *req; + int err; + + req = &d.data.req; + init_send_req(&d.msg, req, 0, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_INFO; + req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET; + err = octep_send_mbox_req(oct, &d, true); + if (err < 0) + return err; + + resp = &d.data.resp; + memcpy(info, &resp->info.fw_info, sizeof(struct octep_fw_info)); + + return 0; +} + int octep_ctrl_net_uninit(struct octep_device *oct) { struct octep_ctrl_net_wait_data *pos, *n; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h index 1c2ef4ee31..b330f37013 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h @@ -41,6 +41,7 @@ enum octep_ctrl_net_h2f_cmd { OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS, OCTEP_CTRL_NET_H2F_CMD_RX_STATE, OCTEP_CTRL_NET_H2F_CMD_LINK_INFO, + OCTEP_CTRL_NET_H2F_CMD_GET_INFO, OCTEP_CTRL_NET_H2F_CMD_MAX }; @@ -161,6 +162,11 @@ struct octep_ctrl_net_h2f_resp_cmd_state { u16 state; }; +/* get info request */ +struct octep_ctrl_net_h2f_resp_cmd_get_info { + struct octep_fw_info fw_info; +}; + /* Host to fw response data */ struct octep_ctrl_net_h2f_resp { union octep_ctrl_net_resp_hdr hdr; @@ -171,6 +177,7 @@ struct octep_ctrl_net_h2f_resp { struct octep_ctrl_net_h2f_resp_cmd_state link; struct octep_ctrl_net_h2f_resp_cmd_state rx; struct octep_ctrl_net_link_info link_info; + struct octep_ctrl_net_h2f_resp_cmd_get_info info; }; } __packed; @@ -330,6 +337,17 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct, */ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct); +/** Get info from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param info: non-null pointer to struct octep_fw_info. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_get_info(struct octep_device *oct, int vfid, + struct octep_fw_info *info); + /** Uninitialize data for ctrl net. * * @param oct: non-null pointer to struct octep_device. diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 2ee1374db4..a9bdf3283a 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -155,18 +155,153 @@ static void octep_disable_msix(struct octep_device *oct) } /** - * octep_non_ioq_intr_handler() - common handler for all generic interrupts. + * octep_oei_intr_handler() - common handler for output endpoint interrupts. * * @irq: Interrupt number. * @data: interrupt data. * - * this is common handler for all non-queue (generic) interrupts. + * this is common handler for all output endpoint interrupts. */ -static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data) +static irqreturn_t octep_oei_intr_handler(int irq, void *data) { struct octep_device *oct = data; - return oct->hw_ops.non_ioq_intr_handler(oct); + return oct->hw_ops.oei_intr_handler(oct); +} + +/** + * octep_ire_intr_handler() - common handler for input ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for input ring error interrupts. + */ +static irqreturn_t octep_ire_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.ire_intr_handler(oct); +} + +/** + * octep_ore_intr_handler() - common handler for output ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for output ring error interrupts. + */ +static irqreturn_t octep_ore_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.ore_intr_handler(oct); +} + +/** + * octep_vfire_intr_handler() - common handler for vf input ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for vf input ring error interrupts. + */ +static irqreturn_t octep_vfire_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.vfire_intr_handler(oct); +} + +/** + * octep_vfore_intr_handler() - common handler for vf output ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for vf output ring error interrupts. + */ +static irqreturn_t octep_vfore_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.vfore_intr_handler(oct); +} + +/** + * octep_dma_intr_handler() - common handler for dpi dma related interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for dpi dma related interrupts. + */ +static irqreturn_t octep_dma_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.dma_intr_handler(oct); +} + +/** + * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for dpi dma transaction error interrupts for VFs. + */ +static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.dma_vf_intr_handler(oct); +} + +/** + * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for pp transaction error interrupts for VFs. + */ +static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.pp_vf_intr_handler(oct); +} + +/** + * octep_misc_intr_handler() - common handler for mac related interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for mac related interrupts. + */ +static irqreturn_t octep_misc_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.misc_intr_handler(oct); +} + +/** + * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use). + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for all reserved interrupts. + */ +static irqreturn_t octep_rsvd_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.rsvd_intr_handler(oct); } /** @@ -222,9 +357,57 @@ static int octep_request_irqs(struct octep_device *oct) snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, "%s-%s", netdev->name, non_ioq_msix_names[i]); - ret = request_irq(msix_entry->vector, - octep_non_ioq_intr_handler, 0, - irq_name, oct); + if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint", + strlen("epf_oei_rint"))) { + ret = request_irq(msix_entry->vector, + octep_oei_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint", + strlen("epf_ire_rint"))) { + ret = request_irq(msix_entry->vector, + octep_ire_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint", + strlen("epf_ore_rint"))) { + ret = request_irq(msix_entry->vector, + octep_ore_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint", + strlen("epf_vfire_rint"))) { + ret = request_irq(msix_entry->vector, + octep_vfire_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint", + strlen("epf_vfore_rint"))) { + ret = request_irq(msix_entry->vector, + octep_vfore_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint", + strlen("epf_dma_rint"))) { + ret = request_irq(msix_entry->vector, + octep_dma_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint", + strlen("epf_dma_vf_rint"))) { + ret = request_irq(msix_entry->vector, + octep_dma_vf_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint", + strlen("epf_pp_vf_rint"))) { + ret = request_irq(msix_entry->vector, + octep_pp_vf_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint", + strlen("epf_misc_rint"))) { + ret = request_irq(msix_entry->vector, + octep_misc_intr_handler, 0, + irq_name, oct); + } else { + ret = request_irq(msix_entry->vector, + octep_rsvd_intr_handler, 0, + irq_name, oct); + } + if (ret) { netdev_err(netdev, "request_irq failed for %s; err=%d", @@ -917,9 +1100,9 @@ static void octep_hb_timeout_task(struct work_struct *work) int miss_cnt; miss_cnt = atomic_inc_return(&oct->hb_miss_cnt); - if (miss_cnt < oct->conf->max_hb_miss_cnt) { + if (miss_cnt < oct->conf->fw_info.hb_miss_count) { queue_delayed_work(octep_wq, &oct->hb_task, - msecs_to_jiffies(oct->conf->hb_interval * 1000)); + msecs_to_jiffies(oct->conf->fw_info.hb_interval)); return; } @@ -1010,10 +1193,16 @@ int octep_device_setup(struct octep_device *oct) if (ret) return ret; + INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); + INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); + INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task); + oct->poll_non_ioq_intr = true; + queue_delayed_work(octep_wq, &oct->intr_poll_task, + msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); + atomic_set(&oct->hb_miss_cnt, 0); INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); - queue_delayed_work(octep_wq, &oct->hb_task, - msecs_to_jiffies(oct->conf->hb_interval * 1000)); + return 0; unsupported_dev: @@ -1143,12 +1332,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "Device setup failed\n"); goto err_octep_config; } - INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); - INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); - INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task); - octep_dev->poll_non_ioq_intr = true; - queue_delayed_work(octep_wq, &octep_dev->intr_poll_task, - msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); + + err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, + &octep_dev->conf->fw_info); + if (err) { + dev_err(&pdev->dev, "Failed to get firmware info\n"); + goto register_dev_err; + } + dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n", + octep_dev->conf->fw_info.hb_interval, + octep_dev->conf->fw_info.hb_miss_count); + queue_delayed_work(octep_wq, &octep_dev->hb_task, + msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval)); netdev->netdev_ops = &octep_netdev_ops; octep_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index e0907a7191..6df902ebb7 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -65,7 +65,16 @@ struct octep_hw_ops { void (*setup_oq_regs)(struct octep_device *oct, int q); void (*setup_mbox_regs)(struct octep_device *oct, int mbox); - irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); + irqreturn_t (*oei_intr_handler)(void *ioq_vector); + irqreturn_t (*ire_intr_handler)(void *ioq_vector); + irqreturn_t (*ore_intr_handler)(void *ioq_vector); + irqreturn_t (*vfire_intr_handler)(void *ioq_vector); + irqreturn_t (*vfore_intr_handler)(void *ioq_vector); + irqreturn_t (*dma_intr_handler)(void *ioq_vector); + irqreturn_t (*dma_vf_intr_handler)(void *ioq_vector); + irqreturn_t (*pp_vf_intr_handler)(void *ioq_vector); + irqreturn_t (*misc_intr_handler)(void *ioq_vector); + irqreturn_t (*rsvd_intr_handler)(void *ioq_vector); irqreturn_t (*ioq_intr_handler)(void *ioq_vector); int (*soft_reset)(struct octep_device *oct); void (*reinit_regs)(struct octep_device *oct); @@ -73,7 +82,7 @@ struct octep_hw_ops { void (*enable_interrupts)(struct octep_device *oct); void (*disable_interrupts)(struct octep_device *oct); - bool (*poll_non_ioq_interrupts)(struct octep_device *oct); + void (*poll_non_ioq_interrupts)(struct octep_device *oct); void (*enable_io_queues)(struct octep_device *oct); void (*disable_io_queues)(struct octep_device *oct); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h index b25c3093dc..0a43983e91 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h @@ -370,4 +370,8 @@ /* bit 1 for firmware heartbeat interrupt */ #define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1) +#define CN93_PEM_BAR4_INDEX 7 +#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL +#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE) + #endif /* _OCTEP_REGS_CN9K_PF_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h index 782a24f27f..49feae80d7 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h @@ -20,6 +20,7 @@ struct octep_oq_desc_hw { dma_addr_t buffer_ptr; u64 info_ptr; }; +static_assert(sizeof(struct octep_oq_desc_hw) == 16); #define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw)) @@ -39,6 +40,7 @@ struct octep_oq_resp_hw_ext { /* checksum verified. */ u64 csum_verified:2; }; +static_assert(sizeof(struct octep_oq_resp_hw_ext) == 8); #define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext)) @@ -50,6 +52,7 @@ struct octep_oq_resp_hw { /* The Length of the packet. */ __be64 length; }; +static_assert(sizeof(struct octep_oq_resp_hw) == 8); #define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw)) diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h index 21e75ff9f5..86c98b13fc 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -36,6 +36,7 @@ struct octep_tx_sglist_desc { u16 len[4]; dma_addr_t dma_ptr[4]; }; +static_assert(sizeof(struct octep_tx_sglist_desc) == 40); /* Each Scatter/Gather entry sent to hardwar hold four pointers. * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' @@ -239,6 +240,7 @@ struct octep_instr_hdr { /* Reserved3 */ u64 reserved3:1; }; +static_assert(sizeof(struct octep_instr_hdr) == 8); /* Hardware Tx completion response header */ struct octep_instr_resp_hdr { @@ -263,6 +265,7 @@ struct octep_instr_resp_hdr { /* Opcode for the return packet */ u64 opcode:16; }; +static_assert(sizeof(struct octep_instr_hdr) == 8); /* 64-byte Tx instruction format. * Format of instruction for a 64-byte mode input queue. @@ -293,6 +296,7 @@ struct octep_tx_desc_hw { /* Additional headers available in a 64-byte instruction. */ u64 exhdr[4]; }; +static_assert(sizeof(struct octep_tx_desc_hw) == 64); #define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw)) #endif /* _OCTEP_TX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index e06f77ad61..6c70c84986 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1218,8 +1218,6 @@ static inline void link_status_user_format(u64 lstat, struct cgx_link_user_info *linfo, struct cgx *cgx, u8 lmac_id) { - const char *lmac_string; - linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; @@ -1230,12 +1228,12 @@ static inline void link_status_user_format(u64 lstat, if (linfo->lmac_type_id >= LMAC_MODE_MAX) { dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", linfo->lmac_type_id, cgx->cgx_id, lmac_id); - strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1); + strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type)); return; } - lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; - strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); + strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id], + sizeof(linfo->lmac_type)); } /* Hardware event handlers */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 31bd9aeb41..5df42634ce 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1473,6 +1473,12 @@ struct flow_msg { u8 next_header; }; __be16 vlan_itci; +#define OTX2_FLOWER_MASK_MPLS_LB GENMASK(31, 12) +#define OTX2_FLOWER_MASK_MPLS_TC GENMASK(11, 9) +#define OTX2_FLOWER_MASK_MPLS_BOS BIT(8) +#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0) +#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8) + u32 mpls_lse[4]; }; struct npc_install_flow_req { @@ -1574,7 +1580,7 @@ enum ptp_op { PTP_OP_GET_CLOCK = 1, PTP_OP_GET_TSTMP = 2, PTP_OP_SET_THRESH = 3, - PTP_OP_EXTTS_ON = 4, + PTP_OP_PPS_ON = 4, PTP_OP_ADJTIME = 5, PTP_OP_SET_CLOCK = 6, }; @@ -1584,7 +1590,8 @@ struct ptp_req { u8 op; s64 scaled_ppm; u64 thresh; - int extts_on; + u64 period; + int pps_on; s64 delta; u64 clk; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index c92c3f4631..8c0732c9a7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -206,6 +206,14 @@ enum key_fields { NPC_SPORT_SCTP, NPC_DPORT_SCTP, NPC_IPSEC_SPI, + NPC_MPLS1_LBTCBOS, + NPC_MPLS1_TTL, + NPC_MPLS2_LBTCBOS, + NPC_MPLS2_TTL, + NPC_MPLS3_LBTCBOS, + NPC_MPLS3_TTL, + NPC_MPLS4_LBTCBOS, + NPC_MPLS4_TTL, NPC_HEADER_FIELDS_MAX, NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */ NPC_PF_FUNC, /* Valid when Tx */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index ffbd227971..bcc96eed24 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -46,6 +46,7 @@ #define PTP_PPS_HI_INCR 0xF60ULL #define PTP_PPS_LO_INCR 0xF68ULL +#define PTP_PPS_THRESH_LO 0xF50ULL #define PTP_PPS_THRESH_HI 0xF58ULL #define PTP_CLOCK_LO 0xF08ULL @@ -411,29 +412,12 @@ void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts) } clock_cfg |= PTP_CLOCK_CFG_PTP_EN; - clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; clock_cfg |= (ATOMIC_SET << 26); writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); - /* Set 50% duty cycle for 1Hz output */ - writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); - writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR); - if (cn10k_ptp_errata(ptp)) { - /* The ptp_clock_hi rollsover to zero once clock cycle before it - * reaches one second boundary. so, program the pps_lo_incr in - * such a way that the pps threshold value comparison at one - * second boundary will succeed and pps edge changes. After each - * one second boundary, the hrtimer handler will be invoked and - * reprograms the pps threshold value. - */ - ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate; - writeq((0x1dcd6500ULL - ptp->clock_period) << 32, - ptp->reg_base + PTP_PPS_LO_INCR); - } - if (cn10k_ptp_errata(ptp)) clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate); else @@ -465,20 +449,68 @@ static int ptp_set_thresh(struct ptp *ptp, u64 thresh) return 0; } -static int ptp_extts_on(struct ptp *ptp, int on) +static int ptp_config_hrtimer(struct ptp *ptp, int on) { u64 ptp_clock_hi; - if (cn10k_ptp_errata(ptp)) { - if (on) { - ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); - ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi); - } else { - if (hrtimer_active(&ptp->hrtimer)) - hrtimer_cancel(&ptp->hrtimer); + if (on) { + ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); + ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi); + } else { + if (hrtimer_active(&ptp->hrtimer)) + hrtimer_cancel(&ptp->hrtimer); + } + + return 0; +} + +static int ptp_pps_on(struct ptp *ptp, int on, u64 period) +{ + u64 clock_cfg; + + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + if (on) { + if (cn10k_ptp_errata(ptp) && period != NSEC_PER_SEC) { + dev_err(&ptp->pdev->dev, "Supports max period value as 1 second\n"); + return -EINVAL; } + + if (period > (8 * NSEC_PER_SEC)) { + dev_err(&ptp->pdev->dev, "Supports max period as 8 seconds\n"); + return -EINVAL; + } + + clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + + writeq(0, ptp->reg_base + PTP_PPS_THRESH_HI); + writeq(0, ptp->reg_base + PTP_PPS_THRESH_LO); + + /* Configure high/low phase time */ + period = period / 2; + writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_HI_INCR); + writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_LO_INCR); + } else { + clock_cfg &= ~(PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV); + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); } + if (on && cn10k_ptp_errata(ptp)) { + /* The ptp_clock_hi rollsover to zero once clock cycle before it + * reaches one second boundary. so, program the pps_lo_incr in + * such a way that the pps threshold value comparison at one + * second boundary will succeed and pps edge changes. After each + * one second boundary, the hrtimer handler will be invoked and + * reprograms the pps threshold value. + */ + ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate; + writeq((0x1dcd6500ULL - ptp->clock_period) << 32, + ptp->reg_base + PTP_PPS_LO_INCR); + } + + if (cn10k_ptp_errata(ptp)) + ptp_config_hrtimer(ptp, on); + return 0; } @@ -613,8 +645,8 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, case PTP_OP_SET_THRESH: err = ptp_set_thresh(rvu->ptp, req->thresh); break; - case PTP_OP_EXTTS_ON: - err = ptp_extts_on(rvu->ptp, req->extts_on); + case PTP_OP_PPS_ON: + err = ptp_pps_on(rvu->ptp, req->pps_on, req->period); break; case PTP_OP_ADJTIME: ptp_atomic_adjtime(rvu->ptp, req->delta); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index ce987ccd43..38acdc7a73 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -773,12 +773,11 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; - /* This msg is expected only from PFs that are mapped to CGX LMACs, + /* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs, * if received from other PF/VF simply ACK, nothing to do. */ - if ((pcifunc & RVU_PFVF_FUNC_MASK) || - !is_pf_cgxmapped(rvu, pf)) - return -ENODEV; + if (!is_pf_cgxmapped(rvu, pf)) + return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index d30e848034..bd817ee887 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2756,6 +2756,27 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); +#define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \ +do { \ + seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \ + seq_printf(s, "mask 0x%lx\n", \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \ +} while (0) \ + +#define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \ +do { \ + typeof(_pkt) (pkt) = (_pkt); \ + typeof(_mask) (mask) = (_mask); \ + seq_printf(s, "%ld %ld %ld\n", \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \ + seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \ +} while (0) \ + static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { @@ -2836,6 +2857,38 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "0x%x ", ntohl(rule->packet.spi)); seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi)); break; + case NPC_MPLS1_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0], + rule->mask.mpls_lse[0]); + break; + case NPC_MPLS1_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0], + rule->mask.mpls_lse[0]); + break; + case NPC_MPLS2_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1], + rule->mask.mpls_lse[1]); + break; + case NPC_MPLS2_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1], + rule->mask.mpls_lse[1]); + break; + case NPC_MPLS3_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2], + rule->mask.mpls_lse[2]); + break; + case NPC_MPLS3_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2], + rule->mask.mpls_lse[2]); + break; + case NPC_MPLS4_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3], + rule->mask.mpls_lse[3]); + break; + case NPC_MPLS4_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3], + rule->mask.mpls_lse[3]); + break; default: seq_puts(s, "\n"); break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index bffe04e6d0..21b5d71c1e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -14,26 +14,16 @@ #define DRV_NAME "octeontx2-af" -static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name) +static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name) { - int err; - - err = devlink_fmsg_pair_nest_start(fmsg, name); - if (err) - return err; - - return devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_pair_nest_start(fmsg, name); + devlink_fmsg_obj_nest_start(fmsg); } -static int rvu_report_pair_end(struct devlink_fmsg *fmsg) +static void rvu_report_pair_end(struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } static bool rvu_common_request_irq(struct rvu *rvu, int offset, @@ -284,175 +274,81 @@ static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx, { struct rvu_nix_event_ctx *nix_event_context; u64 intr_val; - int err; nix_event_context = ctx; switch (health_reporter) { case NIX_AF_RVU_INTR: intr_val = nix_event_context->nix_af_rvu_int; - err = rvu_report_pair_start(fmsg, "NIX_AF_RVU"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ", - nix_event_context->nix_af_rvu_int); - if (err) - return err; - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_RVU"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ", + nix_event_context->nix_af_rvu_int); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); + rvu_report_pair_end(fmsg); break; case NIX_AF_RVU_GEN: intr_val = nix_event_context->nix_af_rvu_gen; - err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ", - nix_event_context->nix_af_rvu_gen); - if (err) - return err; - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop"); - if (err) - return err; - } - if (intr_val & BIT_ULL(1)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop"); - if (err) - return err; - } - if (intr_val & BIT_ULL(4)) { - err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_GENERAL"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ", + nix_event_context->nix_af_rvu_gen); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop"); + if (intr_val & BIT_ULL(1)) + devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop"); + if (intr_val & BIT_ULL(4)) + devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done"); + rvu_report_pair_end(fmsg); break; case NIX_AF_RVU_ERR: intr_val = nix_event_context->nix_af_rvu_err; - err = rvu_report_pair_start(fmsg, "NIX_AF_ERR"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ", - nix_event_context->nix_af_rvu_err); - if (err) - return err; - if (intr_val & BIT_ULL(14)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(13)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write"); - if (err) - return err; - } - if (intr_val & BIT_ULL(12)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); - if (err) - return err; - } - if (intr_val & BIT_ULL(6)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC"); - if (err) - return err; - } - if (intr_val & BIT_ULL(5)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error"); - if (err) - return err; - } - if (intr_val & BIT_ULL(4)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(3)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(2)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(1)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write"); - if (err) - return err; - } - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_ERR"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ", + nix_event_context->nix_af_rvu_err); + if (intr_val & BIT_ULL(14)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read"); + if (intr_val & BIT_ULL(13)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write"); + if (intr_val & BIT_ULL(12)) + devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); + if (intr_val & BIT_ULL(6)) + devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC"); + if (intr_val & BIT_ULL(5)) + devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error"); + if (intr_val & BIT_ULL(4)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read"); + if (intr_val & BIT_ULL(3)) + devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read"); + if (intr_val & BIT_ULL(2)) + devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read"); + if (intr_val & BIT_ULL(1)) + devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write"); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write"); + rvu_report_pair_end(fmsg); break; case NIX_AF_RVU_RAS: intr_val = nix_event_context->nix_af_rvu_err; - err = rvu_report_pair_start(fmsg, "NIX_AF_RAS"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", - nix_event_context->nix_af_rvu_err); - if (err) - return err; - err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); - if (err) - return err; - if (intr_val & BIT_ULL(34)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); - if (err) - return err; - } - if (intr_val & BIT_ULL(33)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S"); - if (err) - return err; - } - if (intr_val & BIT_ULL(32)) { - err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx"); - if (err) - return err; - } - if (intr_val & BIT_ULL(4)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer"); - if (err) - return err; - } - if (intr_val & BIT_ULL(3)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer"); - - if (err) - return err; - } - if (intr_val & BIT_ULL(2)) { - err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer"); - if (err) - return err; - } - if (intr_val & BIT_ULL(1)) { - err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer"); - if (err) - return err; - } - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_RAS"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", + nix_event_context->nix_af_rvu_err); + devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); + if (intr_val & BIT_ULL(34)) + devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); + if (intr_val & BIT_ULL(33)) + devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S"); + if (intr_val & BIT_ULL(32)) + devlink_fmsg_string_put(fmsg, "\n\tHW ctx"); + if (intr_val & BIT_ULL(4)) + devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer"); + if (intr_val & BIT_ULL(3)) + devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer"); + if (intr_val & BIT_ULL(2)) + devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer"); + if (intr_val & BIT_ULL(1)) + devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer"); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read"); + rvu_report_pair_end(fmsg); break; default: return -EINVAL; @@ -919,181 +815,87 @@ static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx, struct rvu_npa_event_ctx *npa_event_context; unsigned int alloc_dis, free_dis; u64 intr_val; - int err; npa_event_context = ctx; switch (health_reporter) { case NPA_AF_RVU_GEN: intr_val = npa_event_context->npa_af_rvu_gen; - err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ", - npa_event_context->npa_af_rvu_gen); - if (err) - return err; - if (intr_val & BIT_ULL(32)) { - err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error"); - if (err) - return err; - } + rvu_report_pair_start(fmsg, "NPA_AF_GENERAL"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ", + npa_event_context->npa_af_rvu_gen); + if (intr_val & BIT_ULL(32)) + devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error"); free_dis = FIELD_GET(GENMASK(15, 0), intr_val); - if (free_dis & BIT(NPA_INPQ_NIX0_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_NIX0_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_NIX1_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_NIX1_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_SSO)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_TIM)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_DPI)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_AURA_OP)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA"); - if (err) - return err; - } + if (free_dis & BIT(NPA_INPQ_NIX0_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX"); + if (free_dis & BIT(NPA_INPQ_NIX0_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX"); + if (free_dis & BIT(NPA_INPQ_NIX1_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX"); + if (free_dis & BIT(NPA_INPQ_NIX1_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX"); + if (free_dis & BIT(NPA_INPQ_SSO)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO"); + if (free_dis & BIT(NPA_INPQ_TIM)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM"); + if (free_dis & BIT(NPA_INPQ_DPI)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI"); + if (free_dis & BIT(NPA_INPQ_AURA_OP)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA"); alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val); - if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_SSO)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_TIM)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_DPI)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX"); + if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX"); + if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX"); + if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX"); + if (alloc_dis & BIT(NPA_INPQ_SSO)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO"); + if (alloc_dis & BIT(NPA_INPQ_TIM)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM"); + if (alloc_dis & BIT(NPA_INPQ_DPI)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI"); + if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA"); + + rvu_report_pair_end(fmsg); break; case NPA_AF_RVU_ERR: - err = rvu_report_pair_start(fmsg, "NPA_AF_ERR"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ", - npa_event_context->npa_af_rvu_err); - if (err) - return err; - - if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NPA_AF_ERR"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ", + npa_event_context->npa_af_rvu_err); + if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read"); + if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write"); + if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) + devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); + rvu_report_pair_end(fmsg); break; case NPA_AF_RVU_RAS: - err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ", - npa_event_context->npa_af_rvu_ras); - if (err) - return err; - if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ", + npa_event_context->npa_af_rvu_ras); + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) + devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S"); + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) + devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S"); + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) + devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context"); + rvu_report_pair_end(fmsg); break; case NPA_AF_RVU_INTR: - err = rvu_report_pair_start(fmsg, "NPA_AF_RVU"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ", - npa_event_context->npa_af_rvu_int); - if (err) - return err; - if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); - if (err) - return err; - } - return rvu_report_pair_end(fmsg); + rvu_report_pair_start(fmsg, "NPA_AF_RVU"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ", + npa_event_context->npa_af_rvu_int); + if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); + rvu_report_pair_end(fmsg); + break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 0bcf3e5592..55639c133d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -437,6 +437,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, return; } + /* AF modifies given action iff PF/VF has requested for it */ + if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT) + return; + /* copy VF default entry action to the VF mcam entry */ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, target_func); @@ -2678,18 +2682,17 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, rsp->entry = NPC_MCAM_ENTRY_INVALID; rsp->free_count = 0; - /* Check if ref_entry is within range */ - if (req->priority && req->ref_entry >= mcam->bmap_entries) { - dev_err(rvu->dev, "%s: reference entry %d is out of range\n", - __func__, req->ref_entry); - return NPC_MCAM_INVALID_REQ; - } + /* Check if ref_entry is greater that the range + * then set it to max value. + */ + if (req->ref_entry > mcam->bmap_entries) + req->ref_entry = mcam->bmap_entries; /* ref_entry can't be '0' if requested priority is high. * Can't be last entry if requested priority is low. */ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || - ((req->ref_entry == (mcam->bmap_entries - 1)) && + ((req->ref_entry == mcam->bmap_entries) && req->priority == NPC_MCAM_LOWER_PRIO)) return NPC_MCAM_INVALID_REQ; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 237f82082e..114e4ec218 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -43,6 +43,14 @@ static const char * const npc_flow_names[] = { [NPC_DPORT_SCTP] = "sctp destination port", [NPC_LXMB] = "Mcast/Bcast header ", [NPC_IPSEC_SPI] = "SPI ", + [NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos", + [NPC_MPLS1_TTL] = "lse depth 1 ttl", + [NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos", + [NPC_MPLS2_TTL] = "lse depth 2 ttl", + [NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos", + [NPC_MPLS3_TTL] = "lse depth 3 ttl", + [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos", + [NPC_MPLS4_TTL] = "lse depth 4", [NPC_UNKNOWN] = "unknown", }; @@ -528,6 +536,14 @@ do { \ NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4); NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4); + NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3); + NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1); + NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3); + NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1); + NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3); + NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1); + NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3); + NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1); /* SMAC follows the DMAC(which is 6 bytes) */ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6); @@ -593,6 +609,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) /* for L2M/L2B/L3M/L3B, check if the type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf)) *features |= BIT_ULL(NPC_LXMB); + + for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) { + if (npc_check_field(rvu, blkaddr, hdr, intf)) + *features |= BIT_ULL(hdr); + } } /* Scan key extraction profile and record how fields of our interest @@ -959,6 +980,47 @@ do { \ NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0, ntohs(mask->vlan_itci), 0); + NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[0]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[0]), 0); + NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[0]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[0]), 0); + NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[1]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[1]), 0); + NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[1]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[1]), 0); + NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[2]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[2]), 0); + NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[2]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[2]), 0); + NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[3]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[3]), 0); + NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[3]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[3]), 0); + NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0, mask->next_header, 0); npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 629cf1659e..02d0b707ae 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); - if (err) + if (err) { + kfree(sq->sg); + sq->sg = NULL; return err; + } } sq->head = 0; @@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; - return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + if (err) { + kfree(sq->sg); + sq->sg = NULL; + return err; + } + + return 0; } @@ -1399,7 +1409,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, } pp_params.order = get_order(buf_size); - pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; + pp_params.flags = PP_FLAG_DMA_MAP; pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); pp_params.nid = NUMA_NO_NODE; pp_params.dev = pfvf->dev; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 53f6258a97..8b7fc0af91 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev, pfvf->hw.tx_queues = channel->tx_count; if (pfvf->xdp_prog) pfvf->hw.xdp_queues = channel->rx_count; - pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues; if (if_up) err = dev->netdev_ops->ndo_open(dev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index a57455aebf..e5fe67e738 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev) /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. */ + pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues; pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues, pf->hw.tc_tx_queues); @@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) xdp_features_clear_redirect_target(dev); } - pf->hw.non_qos_queues += pf->hw.xdp_queues; - if (if_up) otx2_open(pf->netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 3a72b0793d..63130ba37e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -175,7 +175,7 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh) return otx2_sync_mbox_msg(&ptp->nic->mbox); } -static int ptp_extts_on(struct otx2_ptp *ptp, int on) +static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period) { struct ptp_req *req; @@ -186,8 +186,9 @@ static int ptp_extts_on(struct otx2_ptp *ptp, int on) if (!req) return -ENOMEM; - req->op = PTP_OP_EXTTS_ON; - req->extts_on = on; + req->op = PTP_OP_PPS_ON; + req->pps_on = on; + req->period = period; return otx2_sync_mbox_msg(&ptp->nic->mbox); } @@ -276,8 +277,8 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, switch (func) { case PTP_PF_NONE: case PTP_PF_EXTTS: - break; case PTP_PF_PEROUT: + break; case PTP_PF_PHYSYNC: return -1; } @@ -340,6 +341,7 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); + u64 period = 0; int pin; if (!ptp->nic) @@ -351,12 +353,24 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, rq->extts.index); if (pin < 0) return -EBUSY; - if (on) { - ptp_extts_on(ptp, on); + if (on) schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); - } else { - ptp_extts_on(ptp, on); + else cancel_delayed_work_sync(&ptp->extts_work); + + return 0; + case PTP_CLK_REQ_PEROUT: + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (rq->perout.index >= ptp_info->n_pins) + return -EINVAL; + if (on) { + period = rq->perout.period.sec * NSEC_PER_SEC + + rq->perout.period.nsec; + ptp_pps_on(ptp, on, period); + } else { + ptp_pps_on(ptp, on, period); } return 0; default: @@ -411,6 +425,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf) .name = "OcteonTX2 PTP", .max_adj = 1000000000ull, .n_ext_ts = 1, + .n_per_out = 1, .n_pins = 1, .pps = 0, .pin_config = &ptp_ptr->extts_config, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 423ce54eae..db1e0e0e81 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -27,6 +27,8 @@ #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) +#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) + struct otx2_tc_flow_stats { u64 bytes; u64 pkts; @@ -538,6 +540,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_IPSEC) | + BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { netdev_info(nic->netdev, "unsupported flow used key 0x%llx", dissector->used_keys); @@ -757,6 +760,61 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { + struct flow_match_mpls match; + u8 bit; + + flow_rule_match_mpls(rule, &match); + + if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported LSE depth for MPLS match offload"); + return -EOPNOTSUPP; + } + + for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, + FLOW_DIS_MPLS_MAX) { + /* check if any of the fields LABEL,TC,BOS are set */ + if (*((u32 *)&match.mask->ls[bit]) & + OTX2_FLOWER_MASK_MPLS_NON_TTL) { + /* Hardware will capture 4 byte MPLS header into + * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. + * Derive the associated NPC key based on header + * index and offset. + */ + + req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + + 2 * bit); + flow_spec->mpls_lse[bit] = + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, + match.key->ls[bit].mpls_label) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, + match.key->ls[bit].mpls_tc) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, + match.key->ls[bit].mpls_bos); + + flow_mask->mpls_lse[bit] = + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, + match.mask->ls[bit].mpls_label) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, + match.mask->ls[bit].mpls_tc) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, + match.mask->ls[bit].mpls_bos); + } + + if (match.mask->ls[bit].mpls_ttl) { + req->features |= BIT_ULL(NPC_MPLS1_TTL + + 2 * bit); + flow_spec->mpls_lse[bit] |= + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, + match.key->ls[bit].mpls_ttl); + flow_mask->mpls_lse[bit] |= + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, + match.mask->ls[bit].mpls_ttl); + } + } + } + return otx2_tc_parse_actions(nic, &rule->action, req, f, node); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 4d519ea833..f828d32737 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, bool *need_xdp_flush) { - unsigned char *hard_start, *data; + unsigned char *hard_start; int qidx = cq->cq_idx; struct xdp_buff xdp; struct page *page; @@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); - data = (unsigned char *)phys_to_virt(pa); - hard_start = page_address(page); - xdp_prepare_buff(&xdp, hard_start, data - hard_start, + hard_start = (unsigned char *)phys_to_virt(pa); + xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM, cqe->sg.seg_size, false); act = bpf_prog_run_xdp(prog, &xdp); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index d5691b6a2b..dd6ca2e4fd 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1528,7 +1528,7 @@ err_clk: return err; } -static int pxa168_eth_remove(struct platform_device *pdev) +static void pxa168_eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct pxa168_eth_private *pep = netdev_priv(dev); @@ -1547,7 +1547,6 @@ static int pxa168_eth_remove(struct platform_device *pdev) mdiobus_free(pep->smi_bus); unregister_netdev(dev); free_netdev(dev); - return 0; } static void pxa168_eth_shutdown(struct platform_device *pdev) @@ -1580,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match); static struct platform_driver pxa168_eth_driver = { .probe = pxa168_eth_probe, - .remove = pxa168_eth_remove, + .remove_new = pxa168_eth_remove, .shutdown = pxa168_eth_shutdown, .resume = pxa168_eth_resume, .suspend = pxa168_eth_suspend, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 20afe79f38..d2c039f830 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -197,6 +197,7 @@ static const struct mtk_reg_map mt7988_reg_map = { .wdma_base = { [0] = 0x4800, [1] = 0x4c00, + [2] = 0x5000, }, .pse_iq_sta = 0x0180, .pse_oq_sta = 0x01a0, @@ -2210,7 +2211,7 @@ rx_done: net_dim(ð->rx_dim, dim_sample); if (xdp_flush) - xdp_do_flush_map(); + xdp_do_flush(); return done; } @@ -3328,7 +3329,7 @@ static int mtk_device_event(struct notifier_block *n, unsigned long event, void return NOTIFY_DONE; found: - if (!dsa_slave_dev_check(dev)) + if (!dsa_user_dev_check(dev)) return NOTIFY_DONE; if (__ethtool_get_link_ksettings(dev, &s)) @@ -4757,7 +4758,10 @@ static int mtk_probe(struct platform_device *pdev) } if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36)); + if (!err) + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { dev_err(&pdev->dev, "Wrong DMA config\n"); return -EINVAL; @@ -5002,7 +5006,7 @@ err_destroy_sgmii: return err; } -static int mtk_remove(struct platform_device *pdev) +static void mtk_remove(struct platform_device *pdev) { struct mtk_eth *eth = platform_get_drvdata(pdev); struct mtk_mac *mac; @@ -5024,8 +5028,6 @@ static int mtk_remove(struct platform_device *pdev) netif_napi_del(ð->rx_napi); mtk_cleanup(eth); mtk_mdio_cleanup(eth); - - return 0; } static const struct mtk_soc_data mt2701_data = { @@ -5226,7 +5228,7 @@ MODULE_DEVICE_TABLE(of, of_mtk_match); static struct platform_driver mtk_driver = { .probe = mtk_probe, - .remove = mtk_remove, + .remove_new = mtk_remove, .driver = { .name = "mtk_soc_eth", .of_match_table = of_mtk_match, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 403219d987..9ae3b8a71d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1132,7 +1132,7 @@ struct mtk_reg_map { u32 gdm1_cnt; u32 gdma_to_ppe; u32 ppe_base; - u32 wdma_base[2]; + u32 wdma_base[3]; u32 pse_iq_sta; u32 pse_oq_sta; }; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 86f32f4860..b2a5d9c373 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -425,7 +425,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, } int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, - int wdma_idx, int txq, int bss, int wcid) + int wdma_idx, int txq, int bss, int wcid, + bool amsdu_en) { struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); u32 *ib2 = mtk_foe_entry_ib2(eth, entry); @@ -437,6 +438,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, MTK_FOE_IB2_WDMA_WINFO_V2; l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) | FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss); + l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en); break; case 2: *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index e3d0ec72bc..691806bca3 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -88,13 +88,13 @@ enum { #define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16) #define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0) -#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0) -#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16) -#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20) -#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21) -#define MTK_FOE_WINFO_PAO_IS_SP BIT(22) -#define MTK_FOE_WINFO_PAO_HF BIT(23) -#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24) +#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0) +#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16) +#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20) +#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21) +#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22) +#define MTK_FOE_WINFO_AMSDU_HF BIT(23) +#define MTK_FOE_WINFO_AMSDU_EN BIT(24) enum { MTK_FOE_STATE_INVALID, @@ -123,7 +123,7 @@ struct mtk_foe_mac_info { /* netsys_v3 */ u32 w3info; - u32 wpao; + u32 amsdu; }; /* software-only entry type */ @@ -392,7 +392,8 @@ int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, int sid); int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, - int wdma_idx, int txq, int bss, int wcid); + int wdma_idx, int txq, int bss, int wcid, + bool amsdu_en); int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, unsigned int queue); int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index a4efbeb162..fbb5e9d5af 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i info->queue = path->mtk_wdma.queue; info->bss = path->mtk_wdma.bss; info->wcid = path->mtk_wdma.wcid; + info->amsdu = path->mtk_wdma.amsdu; return 0; } @@ -174,7 +175,7 @@ mtk_flow_get_dsa_port(struct net_device **dev) if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) return -ENODEV; - *dev = dsa_port_to_master(dp); + *dev = dsa_port_to_conduit(dp); return dp->index; #else @@ -192,14 +193,17 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue, - info.bss, info.wcid); + info.bss, info.wcid, info.amsdu); if (mtk_is_netsys_v2_or_greater(eth)) { switch (info.wdma_idx) { case 0: - pse_port = 8; + pse_port = PSE_WDMA0_PORT; break; case 1: - pse_port = 9; + pse_port = PSE_WDMA1_PORT; + break; + case 2: + pse_port = PSE_WDMA2_PORT; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 94376aa2b3..9a6744c0d4 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -17,17 +17,21 @@ #include #include #include "mtk_eth_soc.h" -#include "mtk_wed_regs.h" #include "mtk_wed.h" #include "mtk_ppe.h" #include "mtk_wed_wo.h" #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) -#define MTK_WED_PKT_SIZE 1900 +#define MTK_WED_PKT_SIZE 1920 #define MTK_WED_BUF_SIZE 2048 +#define MTK_WED_PAGE_BUF_SIZE 128 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) +#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) #define MTK_WED_RX_RING_SIZE 1536 +#define MTK_WED_RX_PG_BM_CNT 8192 +#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) +#define MTK_WED_AMSDU_NPAGES 32 #define MTK_WED_TX_RING_SIZE 2048 #define MTK_WED_WDMA_RING_SIZE 1024 @@ -41,7 +45,10 @@ #define MTK_WED_RRO_QUE_CNT 8192 #define MTK_WED_MIOD_ENTRY_CNT 128 -static struct mtk_wed_hw *hw_list[2]; +#define MTK_WED_TX_BM_DMA_SIZE 65536 +#define MTK_WED_TX_BM_PKT_CNT 32768 + +static struct mtk_wed_hw *hw_list[3]; static DEFINE_MUTEX(hw_lock); struct mtk_wed_flow_block_priv { @@ -49,6 +56,39 @@ struct mtk_wed_flow_block_priv { struct net_device *dev; }; +static const struct mtk_wed_soc_data mt7622_data = { + .regmap = { + .tx_bm_tkid = 0x088, + .wpdma_rx_ring0 = 0x770, + .reset_idx_tx_mask = GENMASK(3, 0), + .reset_idx_rx_mask = GENMASK(17, 16), + }, + .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), + .wdma_desc_size = sizeof(struct mtk_wdma_desc), +}; + +static const struct mtk_wed_soc_data mt7986_data = { + .regmap = { + .tx_bm_tkid = 0x0c8, + .wpdma_rx_ring0 = 0x770, + .reset_idx_tx_mask = GENMASK(1, 0), + .reset_idx_rx_mask = GENMASK(7, 6), + }, + .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), + .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), +}; + +static const struct mtk_wed_soc_data mt7988_data = { + .regmap = { + .tx_bm_tkid = 0x0c8, + .wpdma_rx_ring0 = 0x7d0, + .reset_idx_tx_mask = GENMASK(1, 0), + .reset_idx_rx_mask = GENMASK(7, 6), + }, + .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), + .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), +}; + static void wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) { @@ -109,6 +149,90 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev) return wdma_r32(dev, MTK_WDMA_GLO_CFG); } +static void +mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) +{ + u32 status; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return; + + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + /* prefetch FIFO */ + wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); + + /* core FIFO */ + wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); + wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); + + /* writeback FIFO */ + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + + /* prefetch ring status */ + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); + + /* writeback ring status */ + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); +} + static int mtk_wdma_rx_reset(struct mtk_wed_device *dev) { @@ -121,6 +245,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) if (ret) dev_err(dev->hw->dev, "rx reset failed\n"); + mtk_wdma_v3_rx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); @@ -135,6 +260,101 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) return ret; } +static u32 +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return !!(wed_r32(dev, reg) & mask); +} + +static int +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + int sleep = 15000; + int timeout = 100 * sleep; + u32 val; + + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, + timeout, false, dev, reg, mask); +} + +static void +mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) +{ + u32 status; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return; + + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + /* prefetch FIFO */ + wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); + + /* core FIFO */ + wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); + wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); + + /* writeback FIFO */ + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + + /* prefetch ring status */ + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); + + /* writeback ring status */ + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); +} + static void mtk_wdma_tx_reset(struct mtk_wed_device *dev) { @@ -146,6 +366,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev) !(status & mask), 0, 10000)) dev_err(dev->hw->dev, "tx reset failed\n"); + mtk_wdma_v3_tx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); @@ -278,7 +499,7 @@ mtk_wed_assign(struct mtk_wed_device *dev) if (!hw->wed_dev) goto out; - if (hw->version == 1) + if (mtk_wed_is_v1(hw)) return NULL; /* MT7986 WED devices do not have any pcie slot restrictions */ @@ -297,36 +518,153 @@ out: return hw; } +static int +mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw = dev->hw; + struct mtk_wed_amsdu *wed_amsdu; + int i; + + if (!mtk_wed_is_v3_or_greater(hw)) + return 0; + + wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES, + sizeof(*wed_amsdu), GFP_KERNEL); + if (!wed_amsdu) + return -ENOMEM; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { + void *ptr; + + /* each segment is 64K */ + ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | + __GFP_ZERO | __GFP_COMP | + GFP_DMA32, + get_order(MTK_WED_AMSDU_BUF_SIZE)); + if (!ptr) + goto error; + + wed_amsdu[i].txd = ptr; + wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, + MTK_WED_AMSDU_BUF_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy)) + goto error; + } + dev->hw->wed_amsdu = wed_amsdu; + + return 0; + +error: + for (i--; i >= 0; i--) + dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); + return -ENOMEM; +} + +static void +mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; + int i; + + if (!wed_amsdu) + return; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { + dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); + free_pages((unsigned long)wed_amsdu[i].txd, + get_order(MTK_WED_AMSDU_BUF_SIZE)); + } +} + +static int +mtk_wed_amsdu_init(struct mtk_wed_device *dev) +{ + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; + int i, ret; + + if (!wed_amsdu) + return 0; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) + wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), + wed_amsdu[i].txd_phy); + + /* init all sta parameter */ + wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | + MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, + dev->wlan.amsdu_max_len >> 8) | + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, + dev->wlan.amsdu_max_subframes)); + + wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); + + ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, + MTK_WED_AMSDU_STA_INFO_DO_INIT); + if (ret) { + dev_err(dev->hw->dev, "amsdu initialization failed\n"); + return ret; + } + + /* init partial amsdu offload txd src */ + wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, + FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); + + /* init qmem */ + wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); + ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); + if (ret) { + pr_info("%s: amsdu qmem initialization failed\n", __func__); + return ret; + } + + /* eagle E1 PCIE1 tx ring 22 flow control issue */ + if (dev->wlan.id == 0x7991) + wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); + + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + + return 0; +} + static int mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) { - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; - void **page_list; + u32 desc_size = dev->hw->soc->tx_ring_desc_size; + int i, page_idx = 0, n_pages, ring_size; int token = dev->wlan.token_start; - int ring_size; - int n_pages; - int i, page_idx; + struct mtk_wed_buf *page_list; + dma_addr_t desc_phys; + void *desc_ptr; - ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); - n_pages = ring_size / MTK_WED_BUF_PER_PAGE; + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + dev->tx_buf_ring.size = ring_size; + } else { + dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; + ring_size = MTK_WED_TX_BM_PKT_CNT; + } + n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); if (!page_list) return -ENOMEM; - dev->tx_buf_ring.size = ring_size; dev->tx_buf_ring.pages = page_list; - desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), - &desc_phys, GFP_KERNEL); - if (!desc) + desc_ptr = dma_alloc_coherent(dev->hw->dev, + dev->tx_buf_ring.size * desc_size, + &desc_phys, GFP_KERNEL); + if (!desc_ptr) return -ENOMEM; - dev->tx_buf_ring.desc = desc; + dev->tx_buf_ring.desc = desc_ptr; dev->tx_buf_ring.desc_phys = desc_phys; - for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { + for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { dma_addr_t page_phys, buf_phys; struct page *page; void *buf; @@ -343,7 +681,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) return -ENOMEM; } - page_list[page_idx++] = page; + page_list[page_idx].p = page; + page_list[page_idx++].phy_addr = page_phys; dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, DMA_BIDIRECTIONAL); @@ -351,28 +690,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) buf_phys = page_phys; for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { - u32 txd_size; - u32 ctrl; - - txd_size = dev->wlan.init_buf(buf, buf_phys, token++); + struct mtk_wdma_desc *desc = desc_ptr; desc->buf0 = cpu_to_le32(buf_phys); - desc->buf1 = cpu_to_le32(buf_phys + txd_size); - - if (dev->hw->version == 1) - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, - MTK_WED_BUF_SIZE - txd_size) | - MTK_WDMA_DESC_CTRL_LAST_SEG1; - else - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, - MTK_WED_BUF_SIZE - txd_size) | - MTK_WDMA_DESC_CTRL_LAST_SEG0; - desc->ctrl = cpu_to_le32(ctrl); - desc->info = 0; - desc++; - + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + u32 txd_size, ctrl; + + txd_size = dev->wlan.init_buf(buf, buf_phys, + token++); + desc->buf1 = cpu_to_le32(buf_phys + txd_size); + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); + if (mtk_wed_is_v1(dev->hw)) + ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, + MTK_WED_BUF_SIZE - txd_size); + else + ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, + MTK_WED_BUF_SIZE - txd_size); + desc->ctrl = cpu_to_le32(ctrl); + desc->info = 0; + } else { + desc->ctrl = cpu_to_le32(token << 16); + } + + desc_ptr += desc_size; buf += MTK_WED_BUF_SIZE; buf_phys += MTK_WED_BUF_SIZE; } @@ -387,42 +729,103 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) static void mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) { - struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; - void **page_list = dev->tx_buf_ring.pages; - int page_idx; - int i; + struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; + struct mtk_wed_hw *hw = dev->hw; + int i, page_idx = 0; if (!page_list) return; - if (!desc) + if (!dev->tx_buf_ring.desc) goto free_pagelist; - for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; - i += MTK_WED_BUF_PER_PAGE) { - void *page = page_list[page_idx++]; - dma_addr_t buf_addr; + for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { + dma_addr_t page_phy = page_list[page_idx].phy_addr; + void *page = page_list[page_idx++].p; if (!page) break; - buf_addr = le32_to_cpu(desc[i].buf0); - dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(page); } - dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), - desc, dev->tx_buf_ring.desc_phys); + dma_free_coherent(dev->hw->dev, + dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, + dev->tx_buf_ring.desc, + dev->tx_buf_ring.desc_phys); free_pagelist: kfree(page_list); } +static int +mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) +{ + int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; + struct mtk_wed_buf *page_list; + struct mtk_wed_bm_desc *desc; + dma_addr_t desc_phys; + int i, page_idx = 0; + + if (!dev->wlan.hw_rro) + return 0; + + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + dev->hw_rro.pages = page_list; + desc = dma_alloc_coherent(dev->hw->dev, + dev->wlan.rx_nbuf * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev->hw_rro.desc = desc; + dev->hw_rro.desc_phys = desc_phys; + + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { + dma_addr_t page_phys, buf_phys; + struct page *page; + int s; + + page = __dev_alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev->hw->dev, page_phys)) { + __free_page(page); + return -ENOMEM; + } + + page_list[page_idx].p = page; + page_list[page_idx++].phy_addr = page_phys; + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + buf_phys = page_phys; + for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { + desc->buf0 = cpu_to_le32(buf_phys); + buf_phys += MTK_WED_PAGE_BUF_SIZE; + desc++; + } + + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + return 0; +} + static int mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) { - struct mtk_rxbm_desc *desc; + struct mtk_wed_bm_desc *desc; dma_addr_t desc_phys; dev->rx_buf_ring.size = dev->wlan.rx_nbuf; @@ -436,13 +839,48 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) dev->rx_buf_ring.desc_phys = desc_phys; dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); - return 0; + return mtk_wed_hwrro_buffer_alloc(dev); +} + +static void +mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wed_buf *page_list = dev->hw_rro.pages; + struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; + int i, page_idx = 0; + + if (!dev->wlan.hw_rro) + return; + + if (!page_list) + return; + + if (!desc) + goto free_pagelist; + + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { + dma_addr_t buf_addr = page_list[page_idx].phy_addr; + void *page = page_list[page_idx++].p; + + if (!page) + break; + + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(page); + } + + dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc), + desc, dev->hw_rro.desc_phys); + +free_pagelist: + kfree(page_list); } static void mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) { - struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; + struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; if (!desc) return; @@ -450,6 +888,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) dev->wlan.release_rx_buf(dev); dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), desc, dev->rx_buf_ring.desc_phys); + + mtk_wed_hwrro_free_buffer(dev); +} + +static void +mtk_wed_hwrro_init(struct mtk_wed_device *dev) +{ + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) + return; + + wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, + FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); + + wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys); + + wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, + MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | + FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, + MTK_WED_RX_PG_BM_CNT)); + + /* enable rx_page_bm to fetch dmad */ + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); } static void @@ -463,6 +923,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); + + mtk_wed_hwrro_init(dev); } static void @@ -498,13 +960,23 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) { u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; - if (dev->hw->version == 1) + switch (dev->hw->version) { + case 1: mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; - else + break; + case 2: mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + break; + case 3: + mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + break; + default: + break; + } if (!dev->hw->num_flows) mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; @@ -516,6 +988,9 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) static void mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) { + if (!mtk_wed_is_v2(dev->hw)) + return; + if (enable) { wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); wed_w32(dev, MTK_WED_TXP_DW1, @@ -527,22 +1002,15 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) } } -#define MTK_WFMDA_RX_DMA_EN BIT(2) -static void -mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) +static int +mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, + struct mtk_wed_ring *ring) { - u32 val; int i; - if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) - return; /* queue is not configured by mt76 */ - for (i = 0; i < 3; i++) { - u32 cur_idx; + u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); - cur_idx = wed_r32(dev, - MTK_WED_WPDMA_RING_RX_DATA(idx) + - MTK_WED_RING_OFS_CPU_IDX); if (cur_idx == MTK_WED_RX_RING_SIZE - 1) break; @@ -551,12 +1019,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) if (i == 3) { dev_err(dev->hw->dev, "rx dma enable failed\n"); - return; + return -ETIMEDOUT; } - val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | - MTK_WFMDA_RX_DMA_EN; - wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); + return 0; } static void @@ -577,7 +1043,7 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); @@ -590,6 +1056,14 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) MTK_WED_WPDMA_RX_D_RX_DRV_EN); wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + + if (mtk_wed_is_v3_or_greater(dev->hw) && + mtk_wed_get_rx_capa(dev)) { + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, + MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, + MTK_WDMA_PREF_RX_CFG_PREF_EN); + } } mtk_wed_set_512_support(dev, false); @@ -606,7 +1080,7 @@ mtk_wed_stop(struct mtk_wed_device *dev) wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); - if (dev->hw->version == 1) + if (!mtk_wed_get_rx_capa(dev)) return; wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); @@ -625,13 +1099,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev) MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - if (dev->hw->version == 1) + if (mtk_wed_is_v1(dev->hw)) return; wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN | MTK_WED_CTRL_WED_RX_BM_EN | MTK_WED_CTRL_RX_RRO_QM_EN); + + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); + wed_clr(dev, MTK_WED_PCIE_INT_CTRL, + MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | + MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); + } } static void @@ -643,6 +1125,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev) mtk_wdma_rx_reset(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); + mtk_wed_amsdu_free_buffer(dev); mtk_wed_free_tx_buffer(dev); mtk_wed_free_tx_rings(dev); @@ -681,21 +1164,37 @@ mtk_wed_detach(struct mtk_wed_device *dev) mutex_unlock(&hw_lock); } -#define PCIE_BASE_ADDR0 0x11280000 static void mtk_wed_bus_init(struct mtk_wed_device *dev) { switch (dev->wlan.bus_type) { case MTK_WED_BUS_PCIE: { struct device_node *np = dev->hw->eth->dev->of_node; - struct regmap *regs; - regs = syscon_regmap_lookup_by_phandle(np, - "mediatek,wed-pcie"); - if (IS_ERR(regs)) - break; + if (mtk_wed_is_v2(dev->hw)) { + struct regmap *regs; + + regs = syscon_regmap_lookup_by_phandle(np, + "mediatek,wed-pcie"); + if (IS_ERR(regs)) + break; - regmap_update_bits(regs, 0, BIT(0), BIT(0)); + regmap_update_bits(regs, 0, BIT(0), BIT(0)); + } + + if (dev->wlan.msi) { + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, + dev->hw->pcie_base | 0xc08); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, + dev->hw->pcie_base | 0xc04); + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); + } else { + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, + dev->hw->pcie_base | 0x180); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, + dev->hw->pcie_base | 0x184); + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); + } wed_w32(dev, MTK_WED_PCIE_INT_CTRL, FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); @@ -703,19 +1202,9 @@ mtk_wed_bus_init(struct mtk_wed_device *dev) /* pcie interrupt control: pola/source selection */ wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | - FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); - wed_r32(dev, MTK_WED_PCIE_INT_CTRL); - - wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); - wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); - - /* pcie interrupt status trigger register */ - wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); - wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); - - /* pola setting */ - wed_set(dev, MTK_WED_PCIE_INT_CTRL, - MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); + MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, + dev->hw->index)); break; } case MTK_WED_BUS_AXI: @@ -731,38 +1220,55 @@ mtk_wed_bus_init(struct mtk_wed_device *dev) static void mtk_wed_set_wpdma(struct mtk_wed_device *dev) { - if (dev->hw->version == 1) { - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); - } else { - mtk_wed_bus_init(dev); + int i; - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); - wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); - wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); - wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); + if (mtk_wed_is_v1(dev->hw)) { + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); + return; } + + mtk_wed_bus_init(dev); + + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); + wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); + + if (!mtk_wed_get_rx_capa(dev)) + return; + + wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); + wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx); + + if (!dev->wlan.hw_rro) + return; + + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]); + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]); + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), + dev->wlan.wpdma_rx_pg + i * 0x10); } static void mtk_wed_hw_init_early(struct mtk_wed_device *dev) { - u32 mask, set; + u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); + u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; mtk_wed_deinit(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); mtk_wed_set_wpdma(dev); - mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | - MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | - MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; - set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | - MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | - MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; + set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + } wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { u32 offset = dev->hw->index ? 0x04000400 : 0; wdma_set(dev, MTK_WDMA_GLO_CFG, @@ -907,11 +1413,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) } /* configure RX_ROUTE_QM */ - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); - wed_set(dev, MTK_WED_RTQM_GLO_CFG, - FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + if (mtk_wed_is_v2(dev->hw)) { + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); + wed_set(dev, MTK_WED_RTQM_GLO_CFG, + FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, + 0x3 + dev->hw->index)); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + } else { + wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, + FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, + 0x3 + dev->hw->index)); + } /* enable RX_ROUTE_QM */ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); } @@ -924,34 +1437,30 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) dev->init_done = true; mtk_wed_set_ext_int(dev, false); - wed_w32(dev, MTK_WED_TX_BM_CTRL, - MTK_WED_TX_BM_CTRL_PAUSE | - FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, - dev->tx_buf_ring.size / 128) | - FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, - MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); - wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); - if (dev->hw->version == 1) { - wed_w32(dev, MTK_WED_TX_BM_TKID, - FIELD_PREP(MTK_WED_TX_BM_TKID_START, - dev->wlan.token_start) | - FIELD_PREP(MTK_WED_TX_BM_TKID_END, - dev->wlan.token_start + - dev->wlan.nbuf - 1)); + if (mtk_wed_is_v1(dev->hw)) { + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | MTK_WED_TX_BM_DYN_THR_HI); - } else { - wed_w32(dev, MTK_WED_TX_BM_TKID_V2, - FIELD_PREP(MTK_WED_TX_BM_TKID_START, - dev->wlan.token_start) | - FIELD_PREP(MTK_WED_TX_BM_TKID_END, - dev->wlan.token_start + - dev->wlan.nbuf - 1)); + } else if (mtk_wed_is_v2(dev->hw)) { + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); + wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, + FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | + MTK_WED_TX_TKID_DYN_THR_HI); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | MTK_WED_TX_BM_DYN_THR_HI_V2); @@ -961,31 +1470,71 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, dev->tx_buf_ring.size / 128)); - wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, - FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | - MTK_WED_TX_TKID_DYN_THR_HI); } + wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid, + FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | + FIELD_PREP(MTK_WED_TX_BM_TKID_END, + dev->wlan.token_start + dev->wlan.nbuf - 1)); + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); - if (dev->hw->version == 1) { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* switch to new bm architecture */ + wed_clr(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_LEGACY_EN); + + wed_w32(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, + dev->wlan.nbuf / 128) | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, + dev->wlan.nbuf / 128)); + /* return SKBID + SDP back to bm */ + wed_set(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_FREE_FORMAT); + + wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, + MTK_WED_TX_BM_PKT_CNT | + MTK_WED_TX_BM_INIT_SW_TAIL_IDX); + } + + if (mtk_wed_is_v1(dev->hw)) { wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - } else { - wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); + } else if (mtk_wed_get_rx_capa(dev)) { /* rx hw init */ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); + /* reset prefetch index of ring */ + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + + /* reset prefetch FIFO of ring */ + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, + MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | + MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); + wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0); + mtk_wed_rx_buffer_hw_init(dev); mtk_wed_rro_hw_init(dev); mtk_wed_route_qm_hw_init(dev); } wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); + if (!mtk_wed_is_v1(dev->hw)) + wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); } static void @@ -1008,23 +1557,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) } } -static u32 -mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) -{ - return !!(wed_r32(dev, reg) & mask); -} - -static int -mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) -{ - int sleep = 15000; - int timeout = 100 * sleep; - u32 val; - - return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, - timeout, false, dev, reg, mask); -} - static int mtk_wed_rx_reset(struct mtk_wed_device *dev) { @@ -1038,13 +1570,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) if (ret) return ret; + if (dev->wlan.hw_rro) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, + MTK_WED_RX_IND_CMD_BUSY); + mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); + } + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); + if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_BUSY); if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); } else { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* 1.a. disable prefetch HW */ + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_EN); + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_BUSY); + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, + MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); + } + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); @@ -1072,23 +1624,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); } + if (dev->wlan.hw_rro) { + /* disable rro msdu page drv */ + wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + + /* disable rro data drv */ + wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); + + /* rro msdu page drv reset */ + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + + /* rro data drv reset */ + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), + MTK_WED_RRO_RX_D_DRV_CLR); + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), + MTK_WED_RRO_RX_D_DRV_CLR); + } + /* reset route qm */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_BUSY); - if (ret) + if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); - else - wed_set(dev, MTK_WED_RTQM_GLO_CFG, - MTK_WED_RTQM_Q_RST); + } else if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); + wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); + } else { + wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + } /* reset tx wdma */ mtk_wdma_tx_reset(dev); /* reset tx wdma drv */ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); - mtk_wed_poll_busy(dev, MTK_WED_CTRL, - MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); + if (mtk_wed_is_v3_or_greater(dev->hw)) + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, + MTK_WED_WPDMA_STATUS_TX_DRV); + else + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); /* reset wed rx dma */ @@ -1098,13 +1679,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); } else { - struct mtk_eth *eth = dev->hw->eth; - - if (mtk_is_netsys_v2_or_greater(eth)) - wed_set(dev, MTK_WED_RESET_IDX, - MTK_WED_RESET_IDX_RX_V2); - else - wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); + wed_set(dev, MTK_WED_RESET_IDX, + dev->hw->soc->regmap.reset_idx_rx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } @@ -1114,6 +1690,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) MTK_WED_CTRL_WED_RX_BM_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); + if (dev->wlan.hw_rro) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_RX_PG_BM_BUSY); + wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); + } + /* wo change to enable state */ val = MTK_WED_WO_STATE_ENABLE; ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, @@ -1131,6 +1715,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) false); } mtk_wed_free_rx_buffer(dev); + mtk_wed_hwrro_free_buffer(dev); return 0; } @@ -1157,21 +1742,48 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); } else { - wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); + wed_w32(dev, MTK_WED_RESET_IDX, + dev->hw->soc->regmap.reset_idx_tx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } /* 2. reset WDMA rx DMA */ busy = !!mtk_wdma_rx_reset(dev); - wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | + wed_r32(dev, MTK_WED_WDMA_GLO_CFG); + val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; + wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); + } else { + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + } + if (!busy) busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); + if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); } else { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* 1.a. disable prefetch HW */ + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_EN); + mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_BUSY); + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_DDONE2_EN); + + /* 2. Reset dma index */ + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, + MTK_WED_WDMA_RESET_IDX_RX_ALL); + } + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); @@ -1187,8 +1799,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); for (i = 0; i < 100; i++) { - val = wed_r32(dev, MTK_WED_TX_BM_INTF); - if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) + if (mtk_wed_is_v1(dev->hw)) + val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, + wed_r32(dev, MTK_WED_TX_BM_INTF)); + else + val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, + wed_r32(dev, MTK_WED_TX_TKID_INTF)); + if (val == 0x40) break; } @@ -1210,6 +1827,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); + if (mtk_wed_is_v3_or_greater(dev->hw)) + wed_w32(dev, MTK_WED_RX1_CTRL2, 0); } else { wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, MTK_WED_WPDMA_RESET_IDX_TX | @@ -1218,7 +1837,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) } dev->init_done = false; - if (dev->hw->version == 1) + if (mtk_wed_is_v1(dev->hw)) return; if (!busy) { @@ -1226,7 +1845,14 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RESET_IDX, 0); } - mtk_wed_rx_reset(dev); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* reset amsdu engine */ + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); + } + + if (mtk_wed_get_rx_capa(dev)) + mtk_wed_rx_reset(dev); } static int @@ -1249,7 +1875,6 @@ static int mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->rx_wdma)) @@ -1257,7 +1882,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, wdma = &dev->rx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, - desc_size, true)) + dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, @@ -1278,7 +1903,6 @@ static int mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->tx_wdma)) @@ -1286,9 +1910,27 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, wdma = &dev->tx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, - desc_size, true)) + dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; + if (mtk_wed_is_v3_or_greater(dev->hw)) { + struct mtk_wdma_desc *desc = wdma->desc; + int i; + + for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { + desc->buf0 = 0; + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc->buf1 = 0; + desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); + desc++; + desc->buf0 = 0; + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc->buf1 = 0; + desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); + desc++; + } + } + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, wdma->desc_phys); wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, @@ -1344,7 +1986,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); @@ -1354,8 +1996,9 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); } else { - wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, - GENMASK(1, 0)); + if (mtk_wed_is_v3_or_greater(dev->hw)) + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); + /* initail tx interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | @@ -1374,15 +2017,20 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, dev->wlan.txfree_tbit)); - wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, - MTK_WED_WPDMA_INT_CTRL_RX0_EN | - MTK_WED_WPDMA_INT_CTRL_RX0_CLR | - MTK_WED_WPDMA_INT_CTRL_RX1_EN | - MTK_WED_WPDMA_INT_CTRL_RX1_CLR | - FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, - dev->wlan.rx_tbit[0]) | - FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, - dev->wlan.rx_tbit[1])); + if (mtk_wed_get_rx_capa(dev)) { + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, + MTK_WED_WPDMA_INT_CTRL_RX0_EN | + MTK_WED_WPDMA_INT_CTRL_RX0_CLR | + MTK_WED_WPDMA_INT_CTRL_RX1_EN | + MTK_WED_WPDMA_INT_CTRL_RX1_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, + dev->wlan.rx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, + dev->wlan.rx_tbit[1])); + + wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, + GENMASK(1, 0)); + } wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); wed_set(dev, MTK_WED_WDMA_INT_CTRL, @@ -1398,55 +2046,280 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) wed_w32(dev, MTK_WED_INT_MASK, irq_mask); } +#define MTK_WFMDA_RX_DMA_EN BIT(2) static void mtk_wed_dma_enable(struct mtk_wed_device *dev) { - wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + int i; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, + MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wdma_set(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_TX_DMA_EN | + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); + wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); + } else { + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); + wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); + } wed_set(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN | MTK_WED_GLO_CFG_RX_DMA_EN); - wed_set(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_set(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); - wdma_set(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_TX_DMA_EN | - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); - - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); - } else { - int i; + return; + } - wed_set(dev, MTK_WED_WPDMA_CTRL, - MTK_WED_WPDMA_CTRL_SDL1_FIXED); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); - wed_set(dev, MTK_WED_WDMA_GLO_CFG, - MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | - MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, + FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | + FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_DDONE2_EN); + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); wed_set(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); + MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); - wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | - MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + } - wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, - MTK_WED_WPDMA_RX_D_RX_DRV_EN | - FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | - FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, - 0x2)); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | + MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + + if (!mtk_wed_get_rx_capa(dev)) + return; + + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RX_DRV_EN | + FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | + FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); + + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_EN | + FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | + FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); + + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); + wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + } + + for (i = 0; i < MTK_WED_RX_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_ring[i]; + u32 val; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; /* queue is not configured by mt76 */ + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { + dev_err(dev->hw->dev, + "rx_ring(%d) dma enable failed\n", i); + continue; + } + + val = wifi_r32(dev, + dev->wlan.wpdma_rx_glo - + dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; + wifi_w32(dev, + dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, + val); + } +} + +static void +mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) +{ + int i; + + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); + wed_w32(dev, MTK_WED_INT_MASK, irq_mask); + + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) + return; + + if (reset) { + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + return; + } + + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, + dev->wlan.rro_rx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, + dev->wlan.rro_rx_tbit[1])); + + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, + dev->wlan.rx_pg_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, + dev->wlan.rx_pg_tbit[1]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, + dev->wlan.rx_pg_tbit[2])); + + /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after + * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken + */ + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + + for (i = 0; i < MTK_WED_RX_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) + dev_err(dev->hw->dev, + "rx_rro_ring(%d) initialization failed\n", i); + } + + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) + dev_err(dev->hw->dev, + "rx_page_ring(%d) initialization failed\n", i); + } +} + +static void +mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, + void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; + + ring->wpdma = regs; + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, + readl(regs)); + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + ring->flags |= MTK_WED_RING_CONFIGURED; +} + +static void +mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; + + ring->wpdma = regs; + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, + readl(regs)); + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + ring->flags |= MTK_WED_RING_CONFIGURED; +} + +static int +mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->ind_cmd_ring; + u32 val = readl(regs + MTK_WED_RING_OFS_COUNT); + int i, count = 0; - for (i = 0; i < MTK_WED_RX_QUEUES; i++) - mtk_wed_check_wfdma_rx_fill(dev, i); + ring->wpdma = regs; + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, + readl(regs) & 0xfffffff0); + + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + + /* ack sn cr */ + wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base + + dev->wlan.ind_cmd.ack_sn_addr); + wed_w32(dev, MTK_WED_RRO_CFG1, + FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, + dev->wlan.ind_cmd.win_size) | + FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, + dev->wlan.ind_cmd.particular_sid)); + + /* particular session addr element */ + wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, + dev->wlan.ind_cmd.particular_se_phys); + + for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { + wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, + dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); + wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, + MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); + + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); + while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); + if (count >= 100) + dev_err(dev->hw->dev, + "write ba session base failed\n"); + } + + /* pn check init */ + for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { + wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, + MTK_WED_PN_CHECK_IS_FIRST); + + wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | + FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); + + count = 0; + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); + while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); + if (count >= 100) + dev_err(dev->hw->dev, + "session(%d) initialization failed\n", i); } + + wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); + + return 0; } static void @@ -1466,14 +2339,14 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) mtk_wed_set_ext_int(dev, true); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); val |= BIT(0) | (BIT(1) * !!dev->hw->index); regmap_write(dev->hw->mirror, dev->hw->index * 4, val); - } else { + } else if (mtk_wed_get_rx_capa(dev)) { /* driver set mid ready and only once */ wed_w32(dev, MTK_WED_EXT_INT_MASK1, MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); @@ -1483,12 +2356,18 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) wed_r32(dev, MTK_WED_EXT_INT_MASK1); wed_r32(dev, MTK_WED_EXT_INT_MASK2); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_w32(dev, MTK_WED_EXT_INT_MASK3, + MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); + wed_r32(dev, MTK_WED_EXT_INT_MASK3); + } + if (mtk_wed_rro_cfg(dev)) return; - } mtk_wed_set_512_support(dev, dev->wlan.wcid_512); + mtk_wed_amsdu_init(dev); mtk_wed_dma_enable(dev); dev->running = true; @@ -1535,6 +2414,7 @@ mtk_wed_attach(struct mtk_wed_device *dev) dev->irq = hw->irq; dev->wdma_idx = hw->index; dev->version = hw->version; + dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); if (hw->eth->dma_dev == hw->eth->dev && of_dma_is_coherent(hw->eth->dev->of_node)) @@ -1544,6 +2424,10 @@ mtk_wed_attach(struct mtk_wed_device *dev) if (ret) goto out; + ret = mtk_wed_amsdu_buffer_alloc(dev); + if (ret) + goto out; + if (mtk_wed_get_rx_capa(dev)) { ret = mtk_wed_rro_alloc(dev); if (ret) @@ -1551,13 +2435,14 @@ mtk_wed_attach(struct mtk_wed_device *dev) } mtk_wed_hw_init_early(dev); - if (hw->version == 1) { + if (mtk_wed_is_v1(hw)) regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); - } else { + else dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); + + if (mtk_wed_get_rx_capa(dev)) ret = mtk_wed_wo_init(hw); - } out: if (ret) { dev_err(dev->hw->dev, "failed to attach wed device\n"); @@ -1601,6 +2486,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, ring->reg_base = MTK_WED_RING_TX(idx); ring->wpdma = regs; + if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) { + /* reset prefetch index */ + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); + + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); + + /* reset prefetch FIFO */ + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, + MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | + MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); + } + /* WED -> WPDMA */ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); @@ -1619,7 +2521,7 @@ static int mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) { struct mtk_wed_ring *ring = &dev->txfree_ring; - int i, index = dev->hw->version == 1; + int i, index = mtk_wed_is_v1(dev->hw); /* * For txfree event handling, the same DMA ring is shared between WED @@ -1675,15 +2577,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, static u32 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) { - u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; + u32 val, ext_mask; - if (dev->hw->version == 1) - ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; + if (mtk_wed_is_v3_or_greater(dev->hw)) + ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; else - ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | - MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | - MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | - MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); @@ -1713,19 +2613,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) int mtk_wed_flow_add(int index) { struct mtk_wed_hw *hw = hw_list[index]; - int ret; + int ret = 0; - if (!hw || !hw->wed_dev) - return -ENODEV; + mutex_lock(&hw_lock); - if (hw->num_flows) { - hw->num_flows++; - return 0; + if (!hw || !hw->wed_dev) { + ret = -ENODEV; + goto out; } - mutex_lock(&hw_lock); - if (!hw->wed_dev) { - ret = -ENODEV; + if (!hw->wed_dev->wlan.offload_enable) + goto out; + + if (hw->num_flows) { + hw->num_flows++; goto out; } @@ -1744,14 +2645,15 @@ void mtk_wed_flow_remove(int index) { struct mtk_wed_hw *hw = hw_list[index]; - if (!hw) - return; + mutex_lock(&hw_lock); - if (--hw->num_flows) - return; + if (!hw || !hw->wed_dev) + goto out; - mutex_lock(&hw_lock); - if (!hw->wed_dev) + if (!hw->wed_dev->wlan.offload_disable) + goto out; + + if (--hw->num_flows) goto out; hw->wed_dev->wlan.offload_disable(hw->wed_dev); @@ -1842,7 +2744,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, { struct mtk_wed_hw *hw = wed->hw; - if (hw->version < 2) + if (mtk_wed_is_v1(hw)) return -EOPNOTSUPP; switch (type) { @@ -1874,6 +2776,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, .detach = mtk_wed_detach, .ppe_check = mtk_wed_ppe_check, .setup_tc = mtk_wed_setup_tc, + .start_hw_rro = mtk_wed_start_hw_rro, + .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, + .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, + .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, }; struct device_node *eth_np = eth->dev->of_node; struct platform_device *pdev; @@ -1916,9 +2822,17 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, hw->wdma = wdma; hw->index = index; hw->irq = irq; - hw->version = mtk_is_netsys_v1(eth) ? 1 : 2; + hw->version = eth->soc->version; - if (hw->version == 1) { + switch (hw->version) { + case 2: + hw->soc = &mt7986_data; + break; + case 3: + hw->soc = &mt7988_data; + break; + default: + case 1: hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, "mediatek,pcie-mirror"); hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, @@ -1932,6 +2846,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, regmap_write(hw->mirror, 0, 0); regmap_write(hw->mirror, 4, 0); } + hw->soc = &mt7622_data; + break; } mtk_wed_hw_add_debugfs(hw); diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h index 43ab77eaf6..c1f0479d7a 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.h +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -9,10 +9,29 @@ #include #include +#include "mtk_wed_regs.h" + struct mtk_eth; struct mtk_wed_wo; +struct mtk_wed_soc_data { + struct { + u32 tx_bm_tkid; + u32 wpdma_rx_ring0; + u32 reset_idx_tx_mask; + u32 reset_idx_rx_mask; + } regmap; + u32 tx_ring_desc_size; + u32 wdma_desc_size; +}; + +struct mtk_wed_amsdu { + void *txd; + dma_addr_t txd_phy; +}; + struct mtk_wed_hw { + const struct mtk_wed_soc_data *soc; struct device_node *node; struct mtk_eth *eth; struct regmap *regs; @@ -24,6 +43,8 @@ struct mtk_wed_hw { struct dentry *debugfs_dir; struct mtk_wed_device *wed_dev; struct mtk_wed_wo *wed_wo; + struct mtk_wed_amsdu *wed_amsdu; + u32 pcie_base; u32 debugfs_reg; u32 num_flows; u8 version; @@ -37,9 +58,30 @@ struct mtk_wdma_info { u8 queue; u16 wcid; u8 bss; + u8 amsdu; }; #ifdef CONFIG_NET_MEDIATEK_SOC_WED +static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw) +{ + return hw->version == 1; +} + +static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw) +{ + return hw->version == 2; +} + +static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw) +{ + return hw->version == 3; +} + +static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw) +{ + return hw->version > 2; +} + static inline void wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) { @@ -122,6 +164,21 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) writel(val, dev->txfree_ring.wpdma + reg); } +static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev) +{ + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return MTK_WED_PCIE_BASE; + + switch (dev->hw->index) { + case 1: + return MTK_WED_PCIE_BASE1; + case 2: + return MTK_WED_PCIE_BASE2; + default: + return MTK_WED_PCIE_BASE0; + } +} + void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, void __iomem *wdma, phys_addr_t wdma_phy, int index); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c index e24afeaea0..781c691473 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c @@ -11,6 +11,7 @@ struct reg_dump { u16 offset; u8 type; u8 base; + u32 mask; }; enum { @@ -25,6 +26,8 @@ enum { #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } +#define DUMP_REG_MASK(_reg, _mask) \ + { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask } #define DUMP_RING(_prefix, _base, ...) \ { _prefix " BASE", _base, __VA_ARGS__ }, \ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ @@ -32,6 +35,7 @@ enum { { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) +#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask) #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) @@ -151,7 +155,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo); static int wed_rxinfo_show(struct seq_file *s, void *data) { - static const struct reg_dump regs[] = { + static const struct reg_dump regs_common[] = { DUMP_STR("WPDMA RX"), DUMP_WPDMA_RX_RING(0), DUMP_WPDMA_RX_RING(1), @@ -169,7 +173,7 @@ wed_rxinfo_show(struct seq_file *s, void *data) DUMP_WED_RING(WED_RING_RX_DATA(0)), DUMP_WED_RING(WED_RING_RX_DATA(1)), - DUMP_STR("WED RRO"), + DUMP_STR("WED WO RRO"), DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0), DUMP_WED(WED_RROQM_MID_MIB), DUMP_WED(WED_RROQM_MOD_MIB), @@ -180,17 +184,6 @@ wed_rxinfo_show(struct seq_file *s, void *data) DUMP_WED(WED_RROQM_FDBK_ANC_MIB), DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB), - DUMP_STR("WED Route QM"), - DUMP_WED(WED_RTQM_R2H_MIB(0)), - DUMP_WED(WED_RTQM_R2Q_MIB(0)), - DUMP_WED(WED_RTQM_Q2H_MIB(0)), - DUMP_WED(WED_RTQM_R2H_MIB(1)), - DUMP_WED(WED_RTQM_R2Q_MIB(1)), - DUMP_WED(WED_RTQM_Q2H_MIB(1)), - DUMP_WED(WED_RTQM_Q2N_MIB), - DUMP_WED(WED_RTQM_Q2B_MIB), - DUMP_WED(WED_RTQM_PFDBK_MIB), - DUMP_STR("WED WDMA TX"), DUMP_WED(WED_WDMA_TX_MIB), DUMP_WED_RING(WED_WDMA_RING_TX), @@ -211,6 +204,287 @@ wed_rxinfo_show(struct seq_file *s, void *data) DUMP_WED(WED_RX_BM_INTF), DUMP_WED(WED_RX_BM_ERR_STS), }; + static const struct reg_dump regs_wed_v2[] = { + DUMP_STR("WED Route QM"), + DUMP_WED(WED_RTQM_R2H_MIB(0)), + DUMP_WED(WED_RTQM_R2Q_MIB(0)), + DUMP_WED(WED_RTQM_Q2H_MIB(0)), + DUMP_WED(WED_RTQM_R2H_MIB(1)), + DUMP_WED(WED_RTQM_R2Q_MIB(1)), + DUMP_WED(WED_RTQM_Q2H_MIB(1)), + DUMP_WED(WED_RTQM_Q2N_MIB), + DUMP_WED(WED_RTQM_Q2B_MIB), + DUMP_WED(WED_RTQM_PFDBK_MIB), + }; + static const struct reg_dump regs_wed_v3[] = { + DUMP_STR("WED RX RRO DATA"), + DUMP_WED_RING(WED_RRO_RX_D_RX(0)), + DUMP_WED_RING(WED_RRO_RX_D_RX(1)), + + DUMP_STR("WED RX MSDU PAGE"), + DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)), + DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)), + DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)), + + DUMP_STR("WED RX IND CMD"), + DUMP_WED(WED_IND_CMD_RX_CTRL1), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX), + DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT), + DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, + WED_IND_CMD_PREFETCH_FREE_CNT), + DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID), + + DUMP_STR("WED ADDR ELEM"), + DUMP_WED(WED_ADDR_ELEM_CFG0), + DUMP_WED_MASK(WED_ADDR_ELEM_CFG1, + WED_ADDR_ELEM_PREFETCH_FREE_CNT), + + DUMP_STR("WED Route QM"), + DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT), + DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT), + DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT), + DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT), + DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT), + DUMP_WED(WED_RTQM_ENQ_ERR_CNT), + + DUMP_WED(WED_RTQM_DEQ_DMAD_CNT), + DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT), + DUMP_WED(WED_RTQM_DEQ_PKT_CNT), + DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT), + DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT), + DUMP_WED(WED_RTQM_DEQ_ERR_CNT), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (dev) { + dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common)); + if (mtk_wed_is_v2(hw)) + dump_wed_regs(s, dev, + regs_wed_v2, ARRAY_SIZE(regs_wed_v2)); + else + dump_wed_regs(s, dev, + regs_wed_v3, ARRAY_SIZE(regs_wed_v3)); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_rxinfo); + +static int +wed_amsdu_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WED AMDSU INFO"), + DUMP_WED(WED_MON_AMSDU_FIFO_DMAD), + + DUMP_STR("WED AMDSU ENG0 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG1 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG2 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG3 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG4 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG5 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG6 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG7 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG8 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED QMEM INFO"), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT), + + DUMP_STR("WED QMEM HEAD INFO"), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD), + + DUMP_STR("WED QMEM TAIL INFO"), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL), + + DUMP_STR("WED HIFTXD MSDU INFO"), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)), + }; struct mtk_wed_hw *hw = s->private; struct mtk_wed_device *dev = hw->wed_dev; @@ -219,7 +493,94 @@ wed_rxinfo_show(struct seq_file *s, void *data) return 0; } -DEFINE_SHOW_ATTRIBUTE(wed_rxinfo); +DEFINE_SHOW_ATTRIBUTE(wed_amsdu); + +static int +wed_rtqm_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"), + DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT), + + DUMP_STR("WED Route QM IGRS1(Legacy)"), + DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)), + DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT), + + DUMP_STR("WED Route QM IGRS2(RRO3.0)"), + DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)), + DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT), + + DUMP_STR("WED Route QM IGRS3(DEBUG)"), + DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)), + DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (dev) + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_rtqm); + +static int +wed_rro_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("RRO/IND CMD CNT"), + DUMP_WED(WED_RX_IND_CMD_CNT(1)), + DUMP_WED(WED_RX_IND_CMD_CNT(2)), + DUMP_WED(WED_RX_IND_CMD_CNT(3)), + DUMP_WED(WED_RX_IND_CMD_CNT(4)), + DUMP_WED(WED_RX_IND_CMD_CNT(5)), + DUMP_WED(WED_RX_IND_CMD_CNT(6)), + DUMP_WED(WED_RX_IND_CMD_CNT(7)), + DUMP_WED(WED_RX_IND_CMD_CNT(8)), + DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9), + WED_IND_CMD_MAGIC_CNT_FAIL_CNT), + + DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)), + DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1), + WED_ADDR_ELEM_SIG_FAIL_CNT), + DUMP_WED(WED_RX_MSDU_PG_CNT(1)), + DUMP_WED(WED_RX_MSDU_PG_CNT(2)), + DUMP_WED(WED_RX_MSDU_PG_CNT(3)), + DUMP_WED(WED_RX_MSDU_PG_CNT(4)), + DUMP_WED(WED_RX_MSDU_PG_CNT(5)), + DUMP_WED_MASK(WED_RX_PN_CHK_CNT, + WED_PN_CHK_FAIL_CNT), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (dev) + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_rro); static int mtk_wed_reg_set(void *data, u64 val) @@ -261,7 +622,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); - if (hw->version != 1) + if (!mtk_wed_is_v1(hw)) { debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops); + if (mtk_wed_is_v3_or_greater(hw)) { + debugfs_create_file_unsafe("amsdu", 0400, dir, hw, + &wed_amsdu_fops); + debugfs_create_file_unsafe("rtqm", 0400, dir, hw, + &wed_rtqm_fops); + debugfs_create_file_unsafe("rro", 0400, dir, hw, + &wed_rro_fops); + } + } } diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c index 071ed3dea8..ea0884186d 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c @@ -16,14 +16,30 @@ #include "mtk_wed_wo.h" #include "mtk_wed.h" -static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg) +static struct mtk_wed_wo_memory_region mem_region[] = { + [MTK_WED_WO_REGION_EMI] = { + .name = "wo-emi", + }, + [MTK_WED_WO_REGION_ILM] = { + .name = "wo-ilm", + }, + [MTK_WED_WO_REGION_DATA] = { + .name = "wo-data", + .shared = true, + }, + [MTK_WED_WO_REGION_BOOT] = { + .name = "wo-boot", + }, +}; + +static u32 wo_r32(u32 reg) { - return readl(wo->boot.addr + reg); + return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg); } -static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) +static void wo_w32(u32 reg, u32 val) { - writel(val, wo->boot.addr + reg); + writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg); } static struct sk_buff * @@ -68,6 +84,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb) struct mtk_wed_wo_rx_stats *stats; int i; + if (!wed->wlan.update_wo_rx_stats) + return; + if (count * sizeof(*stats) > skb->len - sizeof(u32)) return; @@ -204,7 +223,7 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, { struct mtk_wed_wo *wo = dev->hw->wed_wo; - if (dev->hw->version == 1) + if (!mtk_wed_get_rx_capa(dev)) return 0; if (WARN_ON(!wo)) @@ -215,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, } static int -mtk_wed_get_memory_region(struct mtk_wed_wo *wo, +mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index, struct mtk_wed_wo_memory_region *region) { struct reserved_mem *rmem; struct device_node *np; - int index; - index = of_property_match_string(wo->hw->node, "memory-region-names", - region->name); - if (index < 0) - return index; - - np = of_parse_phandle(wo->hw->node, "memory-region", index); + np = of_parse_phandle(hw->node, "memory-region", index); if (!np) return -ENODEV; @@ -239,14 +252,13 @@ mtk_wed_get_memory_region(struct mtk_wed_wo *wo, region->phy_addr = rmem->base; region->size = rmem->size; - region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size); + region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size); return !region->addr ? -EINVAL : 0; } static int -mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw, - struct mtk_wed_wo_memory_region *region) +mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw) { const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data; const struct mtk_wed_fw_trailer *trailer; @@ -259,50 +271,46 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw, while (region_ptr < trailer_ptr) { u32 length; + int i; fw_region = (const struct mtk_wed_fw_region *)region_ptr; length = le32_to_cpu(fw_region->len); - - if (region->phy_addr != le32_to_cpu(fw_region->addr)) + if (first_region_ptr < ptr + length) goto next; - if (region->size < length) - goto next; + for (i = 0; i < ARRAY_SIZE(mem_region); i++) { + struct mtk_wed_wo_memory_region *region; - if (first_region_ptr < ptr + length) - goto next; + region = &mem_region[i]; + if (region->phy_addr != le32_to_cpu(fw_region->addr)) + continue; + + if (region->size < length) + continue; - if (region->shared && region->consumed) - return 0; + if (region->shared && region->consumed) + break; - if (!region->shared || !region->consumed) { - memcpy_toio(region->addr, ptr, length); - region->consumed = true; - return 0; + if (!region->shared || !region->consumed) { + memcpy_toio(region->addr, ptr, length); + region->consumed = true; + break; + } } + + if (i == ARRAY_SIZE(mem_region)) + return -EINVAL; next: region_ptr += sizeof(*fw_region); ptr += length; } - return -EINVAL; + return 0; } static int mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) { - static struct mtk_wed_wo_memory_region mem_region[] = { - [MTK_WED_WO_REGION_EMI] = { - .name = "wo-emi", - }, - [MTK_WED_WO_REGION_ILM] = { - .name = "wo-ilm", - }, - [MTK_WED_WO_REGION_DATA] = { - .name = "wo-data", - .shared = true, - }, - }; const struct mtk_wed_fw_trailer *trailer; const struct firmware *fw; const char *fw_name; @@ -311,25 +319,38 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) /* load firmware region metadata */ for (i = 0; i < ARRAY_SIZE(mem_region); i++) { - ret = mtk_wed_get_memory_region(wo, &mem_region[i]); + int index = of_property_match_string(wo->hw->node, + "memory-region-names", + mem_region[i].name); + if (index < 0) + continue; + + ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]); if (ret) return ret; } - wo->boot.name = "wo-boot"; - ret = mtk_wed_get_memory_region(wo, &wo->boot); - if (ret) - return ret; - /* set dummy cr */ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL, wo->hw->index + 1); /* load firmware */ - if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed")) - fw_name = MT7981_FIRMWARE_WO; - else - fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0; + switch (wo->hw->version) { + case 2: + if (of_device_is_compatible(wo->hw->node, + "mediatek,mt7981-wed")) + fw_name = MT7981_FIRMWARE_WO; + else + fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 + : MT7986_FIRMWARE_WO0; + break; + case 3: + fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1 + : MT7988_FIRMWARE_WO0; + break; + default: + return -EINVAL; + } ret = request_firmware(&fw, fw_name, wo->hw->dev); if (ret) @@ -343,23 +364,22 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n", trailer->chip_id, trailer->num_region); - for (i = 0; i < ARRAY_SIZE(mem_region); i++) { - ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]); - if (ret) - goto out; - } + ret = mtk_wed_mcu_run_firmware(wo, fw); + if (ret) + goto out; /* set the start address */ - boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR - : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; - wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16); + if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index) + boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR; + else + boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; + wo_w32(boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16); /* wo firmware reset */ - wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); + wo_w32(MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); - val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR); - val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK - : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK; - wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); + val = wo_r32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) | + MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK; + wo_w32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); out: release_firmware(fw); @@ -393,3 +413,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *wo) MODULE_FIRMWARE(MT7981_FIRMWARE_WO); MODULE_FIRMWARE(MT7986_FIRMWARE_WO0); MODULE_FIRMWARE(MT7986_FIRMWARE_WO1); +MODULE_FIRMWARE(MT7988_FIRMWARE_WO0); +MODULE_FIRMWARE(MT7988_FIRMWARE_WO1); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h index f87ab9b8a5..c711909248 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h @@ -13,6 +13,9 @@ #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) +#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29) +#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31) + struct mtk_wdma_desc { __le32 buf0; __le32 ctrl; @@ -25,6 +28,8 @@ struct mtk_wdma_desc { #define MTK_WED_RESET 0x008 #define MTK_WED_RESET_TX_BM BIT(0) #define MTK_WED_RESET_RX_BM BIT(1) +#define MTK_WED_RESET_RX_PG_BM BIT(2) +#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3) #define MTK_WED_RESET_TX_FREE_AGENT BIT(4) #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) @@ -37,6 +42,7 @@ struct mtk_wdma_desc { #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) #define MTK_WED_RESET_RX_RRO_QM BIT(20) #define MTK_WED_RESET_RX_ROUTE_QM BIT(21) +#define MTK_WED_RESET_TX_AMSDU BIT(22) #define MTK_WED_RESET_WED BIT(31) #define MTK_WED_CTRL 0x00c @@ -44,6 +50,9 @@ struct mtk_wdma_desc { #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) +#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5) +#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6) +#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7) #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) @@ -54,9 +63,14 @@ struct mtk_wdma_desc { #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15) #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16) #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17) +#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20) +#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21) +#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22) +#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23) #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) +#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28) #define MTK_WED_EXT_INT_STATUS 0x020 #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) @@ -89,19 +103,26 @@ struct mtk_wdma_desc { #define MTK_WED_EXT_INT_MASK 0x028 #define MTK_WED_EXT_INT_MASK1 0x02c #define MTK_WED_EXT_INT_MASK2 0x030 +#define MTK_WED_EXT_INT_MASK3 0x034 #define MTK_WED_STATUS 0x060 #define MTK_WED_STATUS_TX GENMASK(15, 8) +#define MTK_WED_WPDMA_STATUS 0x068 +#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8) + #define MTK_WED_TX_BM_CTRL 0x080 #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) +#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26) +#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27) #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) #define MTK_WED_TX_BM_BASE 0x084 +#define MTK_WED_TX_BM_INIT_PTR 0x088 +#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0) +#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16) -#define MTK_WED_TX_BM_TKID 0x088 -#define MTK_WED_TX_BM_TKID_V2 0x0c8 #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) @@ -124,6 +145,12 @@ struct mtk_wdma_desc { #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16) #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28) +#define MTK_WED_TX_TKID_INTF 0x0dc +#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16) + +#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0) +#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16) + #define MTK_WED_TX_TKID_DYN_THR 0x0e0 #define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0) #define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16) @@ -160,9 +187,6 @@ struct mtk_wdma_desc { #define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) #define MTK_WED_RESET_IDX 0x20c -#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) -#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) -#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6) #define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30) #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) @@ -174,6 +198,7 @@ struct mtk_wdma_desc { #define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10) #define MTK_WED_SCR0 0x3c0 +#define MTK_WED_RX1_CTRL2 0x418 #define MTK_WED_WPDMA_INT_TRIGGER 0x504 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) @@ -204,12 +229,15 @@ struct mtk_wdma_desc { #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5) #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6) #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7) -#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18) #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19) -#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20) #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21) #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24) +#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25) #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28) +#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30) #define MTK_WED_WPDMA_RESET_IDX 0x50c #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) @@ -255,9 +283,10 @@ struct mtk_wdma_desc { #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) #define MTK_WED_PCIE_INT_CTRL 0x57c -#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20) -#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16) #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12) +#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16) +#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20) +#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21) #define MTK_WED_WPDMA_CFG_BASE 0x580 #define MTK_WED_WPDMA_CFG_INT_MASK 0x584 @@ -283,15 +312,30 @@ struct mtk_wdma_desc { #define MTK_WED_WPDMA_RX_D_RST_IDX 0x760 #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16) +#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20) #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24) #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c -#define MTK_WED_WPDMA_RX_RING 0x770 #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4) #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4) #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c +#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4 +#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0) +#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1) +#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8) +#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16) + +#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8 +#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15) + +#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc + +#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0 +#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0) +#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16) + #define MTK_WED_WDMA_RING_TX 0x800 #define MTK_WED_WDMA_TX_MIB 0x810 @@ -299,6 +343,20 @@ struct mtk_wdma_desc { #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) +#define MTK_WED_WDMA_RX_PREF_CFG 0x950 +#define MTK_WED_WDMA_RX_PREF_EN BIT(0) +#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1) +#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8) +#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16) +#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24) +#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25) +#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26) +#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27) + +#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C +#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0) +#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16) + #define MTK_WED_WDMA_GLO_CFG 0xa04 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1) @@ -322,6 +380,7 @@ struct mtk_wdma_desc { #define MTK_WED_WDMA_RESET_IDX 0xa08 #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) +#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20) #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) #define MTK_WED_WDMA_INT_CLR 0xa24 @@ -331,6 +390,7 @@ struct mtk_wdma_desc { #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) #define MTK_WED_WDMA_INT_CTRL 0xa2c +#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0) #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) #define MTK_WED_WDMA_CFG_BASE 0xaa0 @@ -391,9 +451,62 @@ struct mtk_wdma_desc { #define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) #define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) +#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238 +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0) +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4) +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8) +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12) + +#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21) + #define MTK_WDMA_INT_GRP1 0x250 #define MTK_WDMA_INT_GRP2 0x254 +#define MTK_WDMA_PREF_TX_CFG 0x2d0 +#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0) +#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1) + +#define MTK_WDMA_PREF_RX_CFG 0x2dc +#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0) +#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1) + +#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0 +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0) +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16) + +#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4 +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0) +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16) + +#define MTK_WDMA_PREF_SIDX_CFG 0x2e4 +#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0) +#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4) + +#define MTK_WDMA_WRBK_TX_CFG 0x300 +#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0) +#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30) + +#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4) +#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0) + +#define MTK_WDMA_WRBK_RX_CFG 0x344 +#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0) +#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30) + +#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4) +#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0) + +#define MTK_WDMA_WRBK_SIDX_CFG 0x388 +#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0) +#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4) + #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) #define MTK_PCIE_MIRROR_MAP_EN BIT(0) #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) @@ -407,6 +520,32 @@ struct mtk_wdma_desc { #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5) #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20) +#define MTK_WED_RTQM_RST 0xb04 + +#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c +#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28 +#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34 + +#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44 +#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50 +#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c + +#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c +#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78 +#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84 + +#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94 +#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0 +#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac + #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4) #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4) #define MTK_WED_RTQM_Q2N_MIB 0xb80 @@ -415,6 +554,24 @@ struct mtk_wdma_desc { #define MTK_WED_RTQM_Q2B_MIB 0xb8c #define MTK_WED_RTQM_PFDBK_MIB 0xb90 +#define MTK_WED_RTQM_ENQ_CFG0 0xbb8 +#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12) + +#define MTK_WED_RTQM_FDROP_MIB 0xb84 +#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc +#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0 +#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4 +#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8 +#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc +#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0 + +#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8 +#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc +#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0 +#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4 +#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8 +#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec + #define MTK_WED_RROQM_GLO_CFG 0xc04 #define MTK_WED_RROQM_RST_IDX 0xc08 #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0) @@ -464,7 +621,195 @@ struct mtk_wdma_desc { #define MTK_WED_RX_BM_INTF 0xd9c #define MTK_WED_RX_BM_ERR_STS 0xda8 +#define MTK_RRO_IND_CMD_SIGNATURE 0xe00 +#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0) +#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28) + +#define MTK_WED_IND_CMD_RX_CTRL0 0xe04 +#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0) +#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16) +#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28) + +#define MTK_WED_IND_CMD_RX_CTRL1 0xe08 +#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c +#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0) +#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16) + +#define MTK_WED_RRO_CFG0 0xe10 +#define MTK_WED_RRO_CFG1 0xe14 +#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29) +#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16) +#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0) + +#define MTK_WED_ADDR_ELEM_CFG0 0xe18 +#define MTK_WED_ADDR_ELEM_CFG1 0xe1c +#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16) + +#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20 +#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0) +#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28) +#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29) +#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30) +#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31) + +#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24 +#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28 + +#define MTK_WED_PN_CHECK_CFG 0xe30 +#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0) +#define MTK_WED_PN_CHECK_RD_RDY BIT(28) +#define MTK_WED_PN_CHECK_WR_RDY BIT(29) +#define MTK_WED_PN_CHECK_RD BIT(30) +#define MTK_WED_PN_CHECK_WR BIT(31) + +#define MTK_WED_PN_CHECK_WDATA_M 0xe38 +#define MTK_WED_PN_CHECK_IS_FIRST BIT(17) + +#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8) + +#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58 +#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26) +#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31) + +#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc) +#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc) +#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc) + +#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10) + +#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13) + +#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4) +#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26) +#define MTK_WED_RRO_RX_D_DRV_EN BIT(31) + +#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0 +#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0) + +#define MTK_WED_RRO_PG_BM_BASE 0xeb4 +#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8 +#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0) +#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16) + +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10) + +#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4 +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18) + +#define MTK_WED_RRO_RX_HW_STS 0xf00 +#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0) + +#define MTK_WED_RX_IND_CMD_CNT0 0xf20 +#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31) + +#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4) +#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0) + +#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4) +#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0) +#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16) +#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0) + +#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4) + +#define MTK_WED_RX_PN_CHK_CNT 0xf70 +#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0) + #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000 #define MTK_WED_PCIE_INT_MASK 0x0 +#define MTK_WED_AMSDU_FIFO 0x1800 +#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10) + +#define MTK_WED_AMSDU_STA_INFO 0x01810 +#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0) +#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1) + +#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814 +#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0) +#define MTK_WED_AMSDU_STA_RMVL BIT(1) +#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2) +#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8) + +#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4) + +#define MTK_WED_AMSDU_PSE 0x1910 +#define MTK_WED_AMSDU_PSE_RESET BIT(16) + +#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968 +#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15) + +#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34 + +#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50) + +#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50) +#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0) +#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16) + +#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50) +#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0) +#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16) +#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24) + +#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04 + +#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4) +#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0) + +#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4) +#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0) + +#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4) + +#define MTK_WED_PCIE_BASE 0x11280000 +#define MTK_WED_PCIE_BASE0 0x11300000 +#define MTK_WED_PCIE_BASE1 0x11310000 +#define MTK_WED_PCIE_BASE2 0x11290000 #endif diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h index 7a1a2a28f1..87a67fa386 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h @@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx { #define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin" #define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin" #define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin" +#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin" +#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin" #define MTK_WO_MCU_CFG_LS_BASE 0 #define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000) @@ -228,7 +230,6 @@ struct mtk_wed_wo_queue { struct mtk_wed_wo { struct mtk_wed_hw *hw; - struct mtk_wed_wo_memory_region boot; struct mtk_wed_wo_queue q_tx; struct mtk_wed_wo_queue q_rx; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 332472fe49..a09b6e0533 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -400,7 +400,7 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) for (ring = 0; ring < priv->rx_ring_num; ring++) { if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { local_bh_disable(); - napi_reschedule(&priv->rx_cq[ring]->napi); + napi_schedule(&priv->rx_cq[ring]->napi); local_bh_enable(); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index fe48d20d61..0005d9e2c2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1967,7 +1967,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) { u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4); - strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1); + strscpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ); mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index c4f4de82e2..685335832a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -189,3 +189,11 @@ config MLX5_SF_MANAGER port is managed through devlink. A subfunction supports RDMA, netdevice and vdpa device. It is similar to a SRIOV VF but it doesn't require SRIOV support. + +config MLX5_DPLL + tristate "Mellanox 5th generation network adapters (ConnectX series) DPLL support" + depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + select DPLL + help + DPLL support in Mellanox Technologies ConnectX NICs. + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 7e94caca48..c44870b175 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -128,3 +128,6 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_ # SF manager # mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o + +obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o +mlx5_dpll-y := dpll.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 55efb932ab..4957412ff1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -528,6 +528,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_SAVE_VHCA_STATE: case MLX5_CMD_OP_LOAD_VHCA_STATE: case MLX5_CMD_OP_SYNC_CRYPTO: + case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -ENOLINK; @@ -731,6 +732,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); MLX5_COMMAND_STR_CASE(SYNC_CRYPTO); + MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS); default: return "unknown command opcode"; } } @@ -2093,6 +2095,74 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, } EXPORT_SYMBOL(mlx5_cmd_exec_cb); +int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, + struct mlx5_cmd_allow_other_vhca_access_attr *attr) +{ + u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {}; + u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {}; + void *key; + + MLX5_SET(allow_other_vhca_access_in, + in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); + MLX5_SET(allow_other_vhca_access_in, + in, object_type_to_be_accessed, attr->obj_type); + MLX5_SET(allow_other_vhca_access_in, + in, object_id_to_be_accessed, attr->obj_id); + + key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); + memcpy(key, attr->access_key, sizeof(attr->access_key)); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, + struct mlx5_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {}; + void *param; + void *attr; + void *key; + int ret; + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, alias_attr->obj_type); + param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param); + MLX5_SET(general_obj_create_param, param, alias_object, 1); + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); + MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); + MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); + + key = MLX5_ADDR_OF(alias_context, attr, access_key); + memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return 0; +} + +int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, + u16 obj_type) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct cmd_msg_cache *ch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 7909f378dc..cf0477f53d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -38,8 +38,6 @@ #include "devlink.h" #include "lag/lag.h" -/* intf dev list mutex */ -static DEFINE_MUTEX(mlx5_intf_mutex); static DEFINE_IDA(mlx5_adev_ida); static bool is_eth_rep_supported(struct mlx5_core_dev *dev) @@ -206,6 +204,19 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev) return err ? false : val.vbool; } +static bool is_dpll_supported(struct mlx5_core_dev *dev) +{ + if (!IS_ENABLED(CONFIG_MLX5_DPLL)) + return false; + + if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) { + mlx5_core_warn(dev, "Missing SyncE capability\n"); + return false; + } + + return true; +} + enum { MLX5_INTERFACE_PROTOCOL_ETH, MLX5_INTERFACE_PROTOCOL_ETH_REP, @@ -215,6 +226,8 @@ enum { MLX5_INTERFACE_PROTOCOL_MPIB, MLX5_INTERFACE_PROTOCOL_VNET, + + MLX5_INTERFACE_PROTOCOL_DPLL, }; static const struct mlx5_adev_device { @@ -237,6 +250,8 @@ static const struct mlx5_adev_device { .is_supported = &is_ib_rep_supported }, [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport", .is_supported = &is_mp_supported }, + [MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll", + .is_supported = &is_dpll_supported }, }; int mlx5_adev_idx_alloc(void) @@ -320,9 +335,9 @@ static void del_adev(struct auxiliary_device *adev) void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev) { - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev) @@ -338,7 +353,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) int ret = 0, i; devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); priv->flags &= ~MLX5_PRIV_FLAGS_DETACH; for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { if (!priv->adev[i]) { @@ -383,7 +398,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) break; } } - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); return ret; } @@ -396,7 +411,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend) int i; devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { if (!priv->adev[i]) continue; @@ -426,7 +441,7 @@ skip_suspend: priv->adev[i] = NULL; } priv->flags |= MLX5_PRIV_FLAGS_DETACH; - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } int mlx5_register_device(struct mlx5_core_dev *dev) @@ -434,10 +449,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev) int ret; devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; ret = mlx5_rescan_drivers_locked(dev); - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); if (ret) mlx5_unregister_device(dev); @@ -447,10 +462,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev) void mlx5_unregister_device(struct mlx5_core_dev *dev) { devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; mlx5_rescan_drivers_locked(dev); - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } static int add_drivers(struct mlx5_core_dev *dev) @@ -528,7 +543,6 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - lockdep_assert_held(&mlx5_intf_mutex); if (priv->flags & MLX5_PRIV_FLAGS_DETACH) return 0; @@ -548,85 +562,3 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid); } - -static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) -{ - return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | - (dev->pdev->bus->number << 8) | - PCI_SLOT(dev->pdev->devfn)); -} - -static int _next_phys_dev(struct mlx5_core_dev *mdev, - const struct mlx5_core_dev *curr) -{ - if (!mlx5_core_is_pf(mdev)) - return 0; - - if (mdev == curr) - return 0; - - if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) && - mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr)) - return 0; - - return 1; -} - -static void *pci_get_other_drvdata(struct device *this, struct device *other) -{ - if (this->driver != other->driver) - return NULL; - - return pci_get_drvdata(to_pci_dev(other)); -} - -static int next_phys_dev_lag(struct device *dev, const void *data) -{ - struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; - - mdev = pci_get_other_drvdata(this->device, dev); - if (!mdev) - return 0; - - if (!mlx5_lag_is_supported(mdev)) - return 0; - - return _next_phys_dev(mdev, data); -} - -static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, - int (*match)(struct device *dev, const void *data)) -{ - struct device *next; - - if (!mlx5_core_is_pf(dev)) - return NULL; - - next = bus_find_device(&pci_bus_type, NULL, dev, match); - if (!next) - return NULL; - - put_device(next); - return pci_get_drvdata(to_pci_dev(next)); -} - -/* Must be called with intf_mutex held */ -struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) -{ - lockdep_assert_held(&mlx5_intf_mutex); - return mlx5_get_next_dev(dev, &next_phys_dev_lag); -} - -void mlx5_dev_list_lock(void) -{ - mutex_lock(&mlx5_intf_mutex); -} -void mlx5_dev_list_unlock(void) -{ - mutex_unlock(&mlx5_intf_mutex); -} - -int mlx5_dev_list_trylock(void) -{ - return mutex_trylock(&mlx5_intf_mutex); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index af8460bb25..3e064234f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -138,7 +138,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, { struct mlx5_core_dev *dev = devlink_priv(devlink); struct pci_dev *pdev = dev->pdev; - bool sf_dev_allocated; int ret = 0; if (mlx5_dev_is_lightweight(dev)) { @@ -148,16 +147,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, return 0; } - sf_dev_allocated = mlx5_sf_dev_allocated(dev); - if (sf_dev_allocated) { - /* Reload results in deleting SF device which further results in - * unregistering devlink instance while holding devlink_mutext. - * Hence, do not support reload. - */ - NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated"); - return -EOPNOTSUPP; - } - if (mlx5_lag_is_active(dev)) { NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 85d3bfa078..080e7eab52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -889,36 +889,16 @@ int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev) return 0; } -static int +static void mlx5_devlink_fmsg_fill_trace(struct devlink_fmsg *fmsg, struct mlx5_fw_trace_data *trace_data) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp); - if (err) - return err; - - err = devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp); + devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost); + devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id); + devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg); + devlink_fmsg_obj_nest_end(fmsg); } int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer, @@ -927,7 +907,6 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer, struct mlx5_fw_trace_data *straces = tracer->st_arr.straces; u32 index, start_index, end_index; u32 saved_traces_index; - int err; if (!straces[0].timestamp) return -ENOMSG; @@ -940,22 +919,18 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer, start_index = 0; end_index = (saved_traces_index - 1) & (SAVED_TRACES_NUM - 1); - err = devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces"); - if (err) - goto unlock; + devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces"); index = start_index; while (index != end_index) { - err = mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]); - if (err) - goto unlock; + mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]); index = (index + 1) & (SAVED_TRACES_NUM - 1); } - err = devlink_fmsg_arr_pair_nest_end(fmsg); -unlock: + devlink_fmsg_arr_pair_nest_end(fmsg); mutex_unlock(&tracer->st_arr.lock); - return err; + + return 0; } static void mlx5_fw_tracer_update_db(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c index e869c65d8e..c7216e84ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c @@ -13,106 +13,55 @@ struct mlx5_vnic_diag_stats { __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)]; }; -int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, - struct devlink_fmsg *fmsg, - u16 vport_num, bool other_vport) +void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, + struct devlink_fmsg *fmsg, + u16 vport_num, bool other_vport) { u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; struct mlx5_vnic_diag_stats vnic; - int err; MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); MLX5_SET(query_vnic_env_in, in, vport_number, vport_num); MLX5_SET(query_vnic_env_in, in, other_vport, !!other_vport); - err = mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out); - if (err) - return err; + mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out); - err = devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters"); - if (err) - return err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; + devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters"); + devlink_fmsg_obj_nest_start(fmsg); if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) { - err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues", - VNIC_ENV_GET(&vnic, total_error_queues)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow", - VNIC_ENV_GET(&vnic, - send_queue_priority_update_flow)); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "total_error_queues", + VNIC_ENV_GET(&vnic, total_error_queues)); + devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow", + VNIC_ENV_GET(&vnic, send_queue_priority_update_flow)); } - if (MLX5_CAP_GEN(dev, eq_overrun_count)) { - err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun", - VNIC_ENV_GET(&vnic, comp_eq_overrun)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun", - VNIC_ENV_GET(&vnic, async_eq_overrun)); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) { - err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun", - VNIC_ENV_GET(&vnic, cq_overrun)); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, invalid_command_count)) { - err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command", - VNIC_ENV_GET(&vnic, invalid_command)); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, quota_exceeded_count)) { - err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command", - VNIC_ENV_GET(&vnic, quota_exceeded_command)); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun", + VNIC_ENV_GET(&vnic, comp_eq_overrun)); + devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun", + VNIC_ENV_GET(&vnic, async_eq_overrun)); } - - if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) { - err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard", - VNIC_ENV_GET64(&vnic, - nic_receive_steering_discard)); - if (err) - return err; - } - + if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) + devlink_fmsg_u32_pair_put(fmsg, "cq_overrun", + VNIC_ENV_GET(&vnic, cq_overrun)); + if (MLX5_CAP_GEN(dev, invalid_command_count)) + devlink_fmsg_u32_pair_put(fmsg, "invalid_command", + VNIC_ENV_GET(&vnic, invalid_command)); + if (MLX5_CAP_GEN(dev, quota_exceeded_count)) + devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command", + VNIC_ENV_GET(&vnic, quota_exceeded_command)); + if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) + devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard", + VNIC_ENV_GET64(&vnic, nic_receive_steering_discard)); if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) { - err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail", - VNIC_ENV_GET64(&vnic, - generated_pkt_steering_fail)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail", - VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail)); - if (err) - return err; + devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail", + VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail)); + devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail", + VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail)); } - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_pair_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter, @@ -121,7 +70,8 @@ static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter, { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); - return mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false); + mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false); + return 0; } static const struct devlink_health_reporter_ops mlx5_reporter_vnic_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h index eba87a39e9..fbc31256f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h @@ -9,8 +9,8 @@ void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev); void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev); -int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, - struct devlink_fmsg *fmsg, - u16 vport_num, bool other_vport); +void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, + struct devlink_fmsg *fmsg, + u16 vport_num, bool other_vport); #endif /* __MLX5_REPORTER_VNIC_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c new file mode 100644 index 0000000000..8ce5c8bcda --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include +#include + +/* This structure represents a reference to DPLL, one is created + * per mdev instance. + */ +struct mlx5_dpll { + struct dpll_device *dpll; + struct dpll_pin *dpll_pin; + struct mlx5_core_dev *mdev; + struct workqueue_struct *wq; + struct delayed_work work; + struct { + bool valid; + enum dpll_lock_status lock_status; + enum dpll_pin_state pin_state; + } last; + struct notifier_block mdev_nb; + struct net_device *tracking_netdev; +}; + +static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id) +{ + u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {}; + int err; + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MSECQ, 0, 0); + if (err) + return err; + *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity); + return 0; +} + +static int +mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev, + enum mlx5_msees_admin_status *admin_status, + enum mlx5_msees_oper_status *oper_status, + bool *ho_acq) +{ + u32 out[MLX5_ST_SZ_DW(msees_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(msees_reg)] = {}; + int err; + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MSEES, 0, 0); + if (err) + return err; + if (admin_status) + *admin_status = MLX5_GET(msees_reg, out, admin_status); + *oper_status = MLX5_GET(msees_reg, out, oper_status); + if (ho_acq) + *ho_acq = MLX5_GET(msees_reg, out, ho_acq); + return 0; +} + +static int +mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev, + enum mlx5_msees_admin_status admin_status) +{ + u32 out[MLX5_ST_SZ_DW(msees_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(msees_reg)] = {}; + + MLX5_SET(msees_reg, in, field_select, + MLX5_MSEES_FIELD_SELECT_ENABLE | + MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS); + MLX5_SET(msees_reg, in, admin_status, admin_status); + return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MSEES, 0, 1); +} + +static enum dpll_lock_status +mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq) +{ + switch (oper_status) { + case MLX5_MSEES_OPER_STATUS_SELF_TRACK: + fallthrough; + case MLX5_MSEES_OPER_STATUS_OTHER_TRACK: + return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ : + DPLL_LOCK_STATUS_LOCKED; + case MLX5_MSEES_OPER_STATUS_HOLDOVER: + fallthrough; + case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER: + return DPLL_LOCK_STATUS_HOLDOVER; + default: + return DPLL_LOCK_STATUS_UNLOCKED; + } +} + +static enum dpll_pin_state +mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status, + enum mlx5_msees_oper_status oper_status) +{ + return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK && + (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK || + oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ? + DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED; +} + +static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, + void *priv, + enum dpll_lock_status *status, + struct netlink_ext_ack *extack) +{ + enum mlx5_msees_oper_status oper_status; + struct mlx5_dpll *mdpll = priv; + bool ho_acq; + int err; + + err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL, + &oper_status, &ho_acq); + if (err) + return err; + + *status = mlx5_dpll_lock_status_get(oper_status, ho_acq); + return 0; +} + +static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll, + void *priv, enum dpll_mode *mode, + struct netlink_ext_ack *extack) +{ + *mode = DPLL_MODE_MANUAL; + return 0; +} + +static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll, + void *priv, + enum dpll_mode mode, + struct netlink_ext_ack *extack) +{ + return mode == DPLL_MODE_MANUAL; +} + +static const struct dpll_device_ops mlx5_dpll_device_ops = { + .lock_status_get = mlx5_dpll_device_lock_status_get, + .mode_get = mlx5_dpll_device_mode_get, + .mode_supported = mlx5_dpll_device_mode_supported, +}; + +static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin, + void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack) +{ + *direction = DPLL_PIN_DIRECTION_INPUT; + return 0; +} + +static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin, + void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + enum mlx5_msees_admin_status admin_status; + enum mlx5_msees_oper_status oper_status; + struct mlx5_dpll *mdpll = pin_priv; + int err; + + err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status, + &oper_status, NULL); + if (err) + return err; + *state = mlx5_dpll_pin_state_get(admin_status, oper_status); + return 0; +} + +static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin, + void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + struct mlx5_dpll *mdpll = pin_priv; + + return mlx5_dpll_synce_status_set(mdpll->mdev, + state == DPLL_PIN_STATE_CONNECTED ? + MLX5_MSEES_ADMIN_STATUS_TRACK : + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING); +} + +static const struct dpll_pin_ops mlx5_dpll_pins_ops = { + .direction_get = mlx5_dpll_pin_direction_get, + .state_on_dpll_get = mlx5_dpll_state_on_dpll_get, + .state_on_dpll_set = mlx5_dpll_state_on_dpll_set, +}; + +static const struct dpll_pin_properties mlx5_dpll_pin_properties = { + .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT, + .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE, +}; + +#define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */ + +static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll) +{ + queue_delayed_work(mdpll->wq, &mdpll->work, + msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL)); +} + +static void mlx5_dpll_periodic_work(struct work_struct *work) +{ + struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll, + work.work); + enum mlx5_msees_admin_status admin_status; + enum mlx5_msees_oper_status oper_status; + enum dpll_lock_status lock_status; + enum dpll_pin_state pin_state; + bool ho_acq; + int err; + + err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status, + &oper_status, &ho_acq); + if (err) + goto err_out; + lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq); + pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status); + + if (!mdpll->last.valid) + goto invalid_out; + + if (mdpll->last.lock_status != lock_status) + dpll_device_change_ntf(mdpll->dpll); + if (mdpll->last.pin_state != pin_state) + dpll_pin_change_ntf(mdpll->dpll_pin); + +invalid_out: + mdpll->last.lock_status = lock_status; + mdpll->last.pin_state = pin_state; + mdpll->last.valid = true; +err_out: + mlx5_dpll_periodic_work_queue(mdpll); +} + +static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll, + struct net_device *netdev) +{ + if (mdpll->tracking_netdev) + return; + netdev_dpll_pin_set(netdev, mdpll->dpll_pin); + mdpll->tracking_netdev = netdev; +} + +static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll) +{ + if (!mdpll->tracking_netdev) + return; + netdev_dpll_pin_clear(mdpll->tracking_netdev); + mdpll->tracking_netdev = NULL; +} + +static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb); + struct net_device *netdev = data; + + switch (event) { + case MLX5_DRIVER_EVENT_UPLINK_NETDEV: + if (netdev) + mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev); + else + mlx5_dpll_netdev_dpll_pin_clear(mdpll); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll, + struct mlx5_core_dev *mdev) +{ + mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event; + mlx5_blocking_notifier_register(mdev, &mdpll->mdev_nb); + mlx5_core_uplink_netdev_event_replay(mdev); +} + +static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll, + struct mlx5_core_dev *mdev) +{ + mlx5_blocking_notifier_unregister(mdev, &mdpll->mdev_nb); + mlx5_dpll_netdev_dpll_pin_clear(mdpll); +} + +static int mlx5_dpll_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct mlx5_dpll *mdpll; + u64 clock_id; + int err; + + err = mlx5_dpll_synce_status_set(mdev, + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING); + if (err) + return err; + + err = mlx5_dpll_clock_id_get(mdev, &clock_id); + if (err) + return err; + + mdpll = kzalloc(sizeof(*mdpll), GFP_KERNEL); + if (!mdpll) + return -ENOMEM; + mdpll->mdev = mdev; + auxiliary_set_drvdata(adev, mdpll); + + /* Multiple mdev instances might share one DPLL device. */ + mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE); + if (IS_ERR(mdpll->dpll)) { + err = PTR_ERR(mdpll->dpll); + goto err_free_mdpll; + } + + err = dpll_device_register(mdpll->dpll, DPLL_TYPE_EEC, + &mlx5_dpll_device_ops, mdpll); + if (err) + goto err_put_dpll_device; + + /* Multiple mdev instances might share one DPLL pin. */ + mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev), + THIS_MODULE, &mlx5_dpll_pin_properties); + if (IS_ERR(mdpll->dpll_pin)) { + err = PTR_ERR(mdpll->dpll_pin); + goto err_unregister_dpll_device; + } + + err = dpll_pin_register(mdpll->dpll, mdpll->dpll_pin, + &mlx5_dpll_pins_ops, mdpll); + if (err) + goto err_put_dpll_pin; + + mdpll->wq = create_singlethread_workqueue("mlx5_dpll"); + if (!mdpll->wq) { + err = -ENOMEM; + goto err_unregister_dpll_pin; + } + + mlx5_dpll_mdev_netdev_track(mdpll, mdev); + + INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work); + mlx5_dpll_periodic_work_queue(mdpll); + + return 0; + +err_unregister_dpll_pin: + dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin, + &mlx5_dpll_pins_ops, mdpll); +err_put_dpll_pin: + dpll_pin_put(mdpll->dpll_pin); +err_unregister_dpll_device: + dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll); +err_put_dpll_device: + dpll_device_put(mdpll->dpll); +err_free_mdpll: + kfree(mdpll); + return err; +} + +static void mlx5_dpll_remove(struct auxiliary_device *adev) +{ + struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev); + struct mlx5_core_dev *mdev = mdpll->mdev; + + cancel_delayed_work_sync(&mdpll->work); + mlx5_dpll_mdev_netdev_untrack(mdpll, mdev); + destroy_workqueue(mdpll->wq); + dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin, + &mlx5_dpll_pins_ops, mdpll); + dpll_pin_put(mdpll->dpll_pin); + dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll); + dpll_device_put(mdpll->dpll); + kfree(mdpll); + + mlx5_dpll_synce_status_set(mdev, + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING); +} + +static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state) +{ + return 0; +} + +static int mlx5_dpll_resume(struct auxiliary_device *adev) +{ + return 0; +} + +static const struct auxiliary_device_id mlx5_dpll_id_table[] = { + { .name = MLX5_ADEV_NAME ".dpll", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table); + +static struct auxiliary_driver mlx5_dpll_driver = { + .name = "dpll", + .probe = mlx5_dpll_probe, + .remove = mlx5_dpll_remove, + .suspend = mlx5_dpll_suspend, + .resume = mlx5_dpll_resume, + .id_table = mlx5_dpll_id_table, +}; + +static int __init mlx5_dpll_init(void) +{ + return auxiliary_driver_register(&mlx5_dpll_driver); +} + +static void __exit mlx5_dpll_exit(void) +{ + auxiliary_driver_unregister(&mlx5_dpll_driver); +} + +module_init(mlx5_dpll_init); +module_exit(mlx5_dpll_exit); + +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 20a6bc1a23..729a11b5fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -141,7 +141,7 @@ struct page_pool; #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 #define MLX5E_MIN_NUM_CHANNELS 0x1 -#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) +#define MLX5E_MAX_NUM_CHANNELS 256 #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_XSK_POLL_BUDGET 64 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ @@ -168,6 +168,13 @@ struct page_pool; #define mlx5e_state_dereference(priv, p) \ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) +enum mlx5e_devcom_events { + MPV_DEVCOM_MASTER_UP, + MPV_DEVCOM_MASTER_DOWN, + MPV_DEVCOM_IPSEC_MASTER_UP, + MPV_DEVCOM_IPSEC_MASTER_DOWN, +}; + static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) { if (mlx5_lag_is_lacp_owner(mdev)) @@ -193,7 +200,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) { return is_kdump_kernel() ? MLX5E_MIN_NUM_CHANNELS : - min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS); + min3(mlx5_comp_vectors_max(mdev), (u32)MLX5E_MAX_NUM_CHANNELS, + (u32)(1 << MLX5_CAP_GEN(mdev, log_max_rqt_size))); } /* The maximum WQE size can be retrieved by max_wqe_sz_sq in @@ -937,6 +945,7 @@ struct mlx5e_priv { struct mlx5e_htb *htb; struct mlx5e_mqprio_rl *mqprio_rl; struct dentry *dfs_root; + struct mlx5_devcom_comp_dev *devcom; }; struct mlx5e_dev { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index c6b6e290fd..0b1ac6e5c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -12,11 +12,19 @@ struct mlx5e_dev *mlx5e_create_devlink(struct device *dev, { struct mlx5e_dev *mlx5e_dev; struct devlink *devlink; + int err; devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev), devlink_net(priv_to_devlink(mdev)), dev); if (!devlink) return ERR_PTR(-ENOMEM); + + err = devl_nested_devlink_set(priv_to_devlink(mdev), devlink); + if (err) { + devlink_free(devlink); + return ERR_PTR(err); + } + devlink_register(devlink); return devlink_priv(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index e5a44b0b96..4d6225e0ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -150,7 +150,6 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, struct dentry *dfs_root); void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs); struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs); -void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc); struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs); struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs); struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 6f4e6c34b2..81523825fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -5,134 +5,59 @@ #include "lib/eq.h" #include "lib/mlx5.h" -int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) +void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) { - int err; - - err = devlink_fmsg_pair_nest_start(fmsg, name); - if (err) - return err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_pair_nest_start(fmsg, name); + devlink_fmsg_obj_nest_start(fmsg); } -int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) +void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_pair_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } -int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; u8 hw_status; void *cqc; - int err; - - err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out); - if (err) - return err; + mlx5_core_query_cq(cq->mdev, &cq->mcq, out); cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); hw_status = MLX5_GET(cqc, cqc, status); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq)); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); + devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); + devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status); + devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq)); + devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq)); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { u8 cq_log_stride; u32 cq_sz; - int err; cq_sz = mlx5_cqwq_get_size(&cq->wq); cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); + devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride)); + devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg) +void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ"); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core)); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ"); + devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); + devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn); + devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx); + devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index); + devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core)); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } void mlx5e_health_create_reporters(struct mlx5e_priv *priv) @@ -235,23 +160,19 @@ int mlx5e_health_report(struct mlx5e_priv *priv, } #define MLX5_HEALTH_DEVLINK_MAX_SIZE 1024 -static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg, - const void *value, u32 value_len) +static void mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg, + const void *value, u32 value_len) { u32 data_size; - int err = 0; u32 offset; for (offset = 0; offset < value_len; offset += data_size) { data_size = value_len - offset; if (data_size > MLX5_HEALTH_DEVLINK_MAX_SIZE) data_size = MLX5_HEALTH_DEVLINK_MAX_SIZE; - err = devlink_fmsg_binary_put(fmsg, value + offset, data_size); - if (err) - break; + devlink_fmsg_binary_put(fmsg, value + offset, data_size); } - return err; } int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key, @@ -259,9 +180,8 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_rsc_dump_cmd *cmd; + int cmd_err, err = 0; struct page *page; - int cmd_err, err; - int end_err; int size; if (IS_ERR_OR_NULL(mdev->rsc_dump)) @@ -271,9 +191,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key if (!page) return -ENOMEM; - err = devlink_fmsg_binary_pair_nest_start(fmsg, "data"); - if (err) - goto free_page; + devlink_fmsg_binary_pair_nest_start(fmsg, "data"); cmd = mlx5_rsc_dump_cmd_create(mdev, key); if (IS_ERR(cmd)) { @@ -288,52 +206,31 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key goto destroy_cmd; } - err = mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size); - if (err) - goto destroy_cmd; - + mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size); } while (cmd_err > 0); destroy_cmd: mlx5_rsc_dump_cmd_destroy(cmd); - end_err = devlink_fmsg_binary_pair_nest_end(fmsg); - if (end_err) - err = end_err; + devlink_fmsg_binary_pair_nest_end(fmsg); free_page: __free_page(page); return err; } -int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, - int queue_idx, char *lbl) +void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, + int queue_idx, char *lbl) { struct mlx5_rsc_key key = {}; - int err; key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = queue_idx; key.size = PAGE_SIZE; key.num_of_obj1 = 1; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx); - if (err) - return err; - - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_start(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl); + devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx); + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_end(fmsg); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index 415840c3ef..84be3dd6f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -20,11 +20,11 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq); -int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); -int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); -int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg); -int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); -int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg); +void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg); +void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); +void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg); void mlx5e_reporter_rx_create(struct mlx5e_priv *priv); void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); @@ -54,6 +54,6 @@ void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv); void mlx5e_health_channels_update(struct mlx5e_priv *priv); int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key, struct devlink_fmsg *fmsg); -int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, - int queue_idx, char *lbl); +void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, + int queue_idx, char *lbl); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 03b119a434..4358798d6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -199,78 +199,38 @@ static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter, mlx5e_health_recover_channels(priv); } -static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, - struct devlink_fmsg *fmsg) +static void mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, + struct devlink_fmsg *fmsg) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "WQE size", - mlx5_wq_cyc_get_size(&icosq->wq)); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq)); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); + devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn); + devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); + devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc); + devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc); + devlink_fmsg_u32_pair_put(fmsg, "WQE size", mlx5_wq_cyc_get_size(&icosq->wq)); + + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); + devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn); + devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc); + devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq)); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq) +static void mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq) { - int err; int i; BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES, "rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h"); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i) { - err = devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i], - test_bit(i, &rq->state)); - if (err) - return err; - } + for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i) + devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i], + test_bit(i, &rq->state)); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int @@ -291,184 +251,93 @@ mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq, wq_head = mlx5e_rqwq_get_head(rq); wqe_counter = mlx5e_rqwq_get_wqe_counter(rq); - err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head); - if (err) - return err; - - err = mlx5e_health_rq_put_sw_state(fmsg, rq); - if (err) - return err; - - err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg); - if (err) - return err; - - err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn); + devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); + devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter); + devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz); + devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head); + mlx5e_health_rq_put_sw_state(fmsg, rq); + mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg); + mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg); if (rq->icosq) { struct mlx5e_icosq *icosq = rq->icosq; u8 icosq_hw_state; + int err; err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state); if (err) return err; - err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); - if (err) - return err; + mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); } return 0; } -static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, - struct devlink_fmsg *fmsg) +static void mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); - if (err) - return err; - - err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); - if (err) - return err; - - return devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); + mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); + devlink_fmsg_obj_nest_end(fmsg); } -static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, - struct devlink_fmsg *fmsg) +static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { struct mlx5e_priv *priv = rq->priv; struct mlx5e_params *params; u32 rq_stride, rq_sz; bool real_time; - int err; params = &priv->channels.params; rq_sz = mlx5e_rqwq_get_size(rq); real_time = mlx5_is_real_time_rq(priv->mdev); rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); - if (err) - return err; - - err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); + devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); + devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride); + devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz); + devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); + mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch, struct devlink_fmsg *fmsg) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter); - if (err) - return err; - - err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); + devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter); + mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq; struct mlx5e_ptp *ptp_ch = priv->channels.ptp; - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config"); - if (err) - return err; - - err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg); - if (err) - return err; - if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { - err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg); - if (err) - return err; - } - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config"); + mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg); + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) + mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq, - struct devlink_fmsg *fmsg) +static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); - if (err) - return err; - - err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); + mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); + devlink_fmsg_obj_nest_end(fmsg); } static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, @@ -477,20 +346,15 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_ptp *ptp_ch = priv->channels.ptp; - int i, err = 0; + int i; mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg); - if (err) - goto unlock; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); - if (err) - goto unlock; + mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -499,19 +363,14 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ? &c->xskrq : &c->rq; - err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); - if (err) - goto unlock; - } - if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { - err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg); - if (err) - goto unlock; + mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); } - err = devlink_fmsg_arr_pair_nest_end(fmsg); + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) + mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); unlock: mutex_unlock(&priv->state_lock); - return err; + return 0; } static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -519,61 +378,34 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_ { struct mlx5e_txqsq *icosq = ctx; struct mlx5_rsc_key key = {}; - int err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = icosq->sqn; key.num_of_obj1 = 1; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); key.rsc = MLX5_SGMT_TYPE_SND_BUFF; key.num_of_obj2 = MLX5_RSC_DUMP_ALL; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + return 0; } static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -581,60 +413,34 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms { struct mlx5_rsc_key key = {}; struct mlx5e_rq *rq = ctx; - int err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = rq->rqn; key.num_of_obj1 = 1; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff"); key.rsc = MLX5_SGMT_TYPE_RCV_BUFF; key.num_of_obj2 = MLX5_RSC_DUMP_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + return 0; } static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, @@ -642,44 +448,28 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, { struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5_rsc_key key = {}; - int i, err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); - for (i = 0; i < priv->channels.num; i++) { + for (int i = 0; i < priv->channels.num; i++) { struct mlx5e_rq *rq = &priv->channels.c[i]->rq; - err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ"); - if (err) - return err; + mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ"); } - if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { - err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ"); - if (err) - return err; - } + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) + mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ"); - return devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); + return 0; } static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index ff8242f67c..6b44ddce14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -50,25 +50,19 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) sq->pc = 0; } -static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq) +static void mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq) { - int err; int i; BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES, "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h"); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) { - err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i], - test_bit(i, &sq->state)); - if (err) - return err; - } + for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) + devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i], + test_bit(i, &sq->state)); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) @@ -220,7 +214,7 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, mlx5e_health_recover_channels(priv); } -static int +static void mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq, int tc) { @@ -229,164 +223,71 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, u8 state; int err; - err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state); - if (err) - return err; - - err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "tc", tc); + devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); + devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); - err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); - if (err) - return err; - - err = mlx5e_health_sq_put_sw_state(fmsg, sq); - if (err) - return err; - - err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg); - if (err) - return err; - - return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); + err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); + if (!err) + devlink_fmsg_u8_pair_put(fmsg, "HW state", state); + + devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped); + devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); + devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); + mlx5e_health_sq_put_sw_state(fmsg, sq); + mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg); + mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); } -static int +static void mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq, int tc) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); - if (err) - return err; - - err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); + mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc); + devlink_fmsg_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg, struct mlx5e_ptpsq *ptpsq, int tc) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); - if (err) - return err; - - err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); - if (err) - return err; - - err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); + mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *txqsq) { - u32 sq_stride, sq_sz; - bool real_time; - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); - if (err) - return err; - - real_time = mlx5_is_real_time_sq(txqsq->mdev); - sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); - sq_stride = MLX5_SEND_WQE_BB; - - err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); - if (err) - return err; - - err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + bool real_time = mlx5_is_real_time_sq(txqsq->mdev); + u32 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); + u32 sq_stride = MLX5_SEND_WQE_BB; + + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); + devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); + devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); + devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); + mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg, struct mlx5e_ptpsq *ptpsq) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); - if (err) - return err; - - err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { @@ -394,39 +295,20 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5e_ptpsq *generic_ptpsq; - int err; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); - if (err) - return err; - - err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); + mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq); if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto out; generic_ptpsq = &ptp_ch->ptpsq[0]; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); - if (err) - return err; - - err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq); - if (err) - return err; - - err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); + mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq); + mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); out: - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, @@ -436,20 +318,15 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_ptp *ptp_ch = priv->channels.ptp; - int i, tc, err = 0; + int i, tc; mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg); - if (err) - goto unlock; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); - if (err) - goto unlock; + mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -457,31 +334,23 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; - err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); - if (err) - goto unlock; + mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); } } if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto close_sqs_nest; - for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { - err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, - &ptp_ch->ptpsq[tc], - tc); - if (err) - goto unlock; - } + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) + mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, + &ptp_ch->ptpsq[tc], + tc); close_sqs_nest: - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - goto unlock; - + devlink_fmsg_arr_pair_nest_end(fmsg); unlock: mutex_unlock(&priv->state_lock); - return err; + return 0; } static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -489,60 +358,33 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms { struct mlx5_rsc_key key = {}; struct mlx5e_txqsq *sq = ctx; - int err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = sq->sqn; key.num_of_obj1 = 1; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); key.rsc = MLX5_SGMT_TYPE_SND_BUFF; key.num_of_obj2 = MLX5_RSC_DUMP_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + return 0; } static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -567,28 +409,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, { struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5_rsc_key key = {}; - int i, tc, err; + int i, tc; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -596,9 +427,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; - err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); - if (err) - return err; + mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); } } @@ -606,13 +435,12 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; - err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); - if (err) - return err; + mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); } } - return devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); + return 0; } static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c index b915fb29dd..7b8ff7a710 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c @@ -9,7 +9,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, { unsigned int i; - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + for (i = 0; i < indir->actual_table_size; i++) indir->table[i] = i % num_channels; } @@ -45,9 +45,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, } int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, - bool indir_enabled, u32 init_rqn) + bool indir_enabled, u32 init_rqn, u32 indir_table_size) { - u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1; + u16 max_size = indir_enabled ? indir_table_size : 1; return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1); } @@ -68,11 +68,11 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns { unsigned int i; - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { + for (i = 0; i < indir->actual_table_size; i++) { unsigned int ix = i; if (hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE)); + ix = mlx5e_bits_invert(ix, ilog2(indir->actual_table_size)); ix = indir->table[ix]; @@ -94,7 +94,7 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, u32 *rss_rqns; int err; - rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL); if (!rss_rqns) return -ENOMEM; @@ -102,13 +102,25 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, if (err) goto out; - err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE); + err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, + indir->actual_table_size); out: kvfree(rss_rqns); return err; } +#define MLX5E_UNIFORM_SPREAD_RQT_FACTOR 2 + +u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels) +{ + u32 rqt_size = max_t(u32, MLX5E_INDIR_MIN_RQT_SIZE, + roundup_pow_of_two(num_channels * MLX5E_UNIFORM_SPREAD_RQT_FACTOR)); + u32 max_cap_rqt_size = 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size); + + return min_t(u32, rqt_size, max_cap_rqt_size); +} + void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt) { mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn); @@ -151,10 +163,10 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_ u32 *rss_rqns; int err; - if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE)) + if (WARN_ON(rqt->size != indir->max_table_size)) return -EINVAL; - rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL); if (!rss_rqns) return -ENOMEM; @@ -162,7 +174,7 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_ if (err) goto out; - err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE); + err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size); out: kvfree(rss_rqns); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h index 60c985a12f..77fba3ebd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h @@ -6,12 +6,14 @@ #include -#define MLX5E_INDIR_RQT_SIZE (1 << 8) +#define MLX5E_INDIR_MIN_RQT_SIZE (BIT(8)) struct mlx5_core_dev; struct mlx5e_rss_params_indir { - u32 table[MLX5E_INDIR_RQT_SIZE]; + u32 *table; + u32 actual_table_size; + u32 max_table_size; }; void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, @@ -24,7 +26,7 @@ struct mlx5e_rqt { }; int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, - bool indir_enabled, u32 init_rqn); + bool indir_enabled, u32 init_rqn, u32 indir_table_size); int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, u32 *rqns, unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir); @@ -35,6 +37,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt) return rqt->rqtn; } +u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels); int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn); int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index 7f93426b88..c1545a2e8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -81,14 +81,75 @@ struct mlx5e_rss { refcount_t refcnt; }; -struct mlx5e_rss *mlx5e_rss_alloc(void) +void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels) { - return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL); + rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels); } -void mlx5e_rss_free(struct mlx5e_rss *rss) +int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev, + u32 actual_table_size, u32 max_table_size) { + indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL); + if (!indir->table) + return -ENOMEM; + + indir->max_table_size = max_table_size; + indir->actual_table_size = actual_table_size; + + return 0; +} + +void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir) +{ + kvfree(indir->table); +} + +static int mlx5e_rss_copy(struct mlx5e_rss *to, const struct mlx5e_rss *from) +{ + u32 *dst_indir_table; + + if (to->indir.actual_table_size != from->indir.actual_table_size || + to->indir.max_table_size != from->indir.max_table_size) { + mlx5e_rss_warn(to->mdev, + "Failed to copy RSS due to size mismatch, src (actual %u, max %u) != dst (actual %u, max %u)\n", + from->indir.actual_table_size, from->indir.max_table_size, + to->indir.actual_table_size, to->indir.max_table_size); + return -EINVAL; + } + + dst_indir_table = to->indir.table; + *to = *from; + to->indir.table = dst_indir_table; + memcpy(to->indir.table, from->indir.table, + from->indir.actual_table_size * sizeof(*from->indir.table)); + return 0; +} + +static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from) +{ + struct mlx5e_rss *rss; + int err; + + rss = kvzalloc(sizeof(*rss), GFP_KERNEL); + if (!rss) + return ERR_PTR(-ENOMEM); + + err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size, + from->indir.max_table_size); + if (err) + goto err_free_rss; + + err = mlx5e_rss_copy(rss, from); + if (err) + goto err_free_indir; + + return rss; + +err_free_indir: + mlx5e_rss_params_indir_cleanup(&rss->indir); +err_free_rss: kvfree(rss); + return ERR_PTR(err); } static void mlx5e_rss_params_init(struct mlx5e_rss *rss) @@ -282,28 +343,43 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss) return retval; } -int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn) +static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss) { - rss->mdev = mdev; - rss->inner_ft_support = inner_ft_support; - rss->drop_rqn = drop_rqn; - mlx5e_rss_params_init(rss); refcount_set(&rss->refcnt, 1); - return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn); + return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true, + rss->drop_rqn, rss->indir.max_table_size); } -int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn, - const struct mlx5e_packet_merge_param *init_pkt_merge_param) +struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + enum mlx5e_rss_init_type type, unsigned int nch, + unsigned int max_nch) { + struct mlx5e_rss *rss; int err; - err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn); + rss = kvzalloc(sizeof(*rss), GFP_KERNEL); + if (!rss) + return ERR_PTR(-ENOMEM); + + err = mlx5e_rss_params_indir_init(&rss->indir, mdev, + mlx5e_rqt_size(mdev, nch), + mlx5e_rqt_size(mdev, max_nch)); + if (err) + goto err_free_rss; + + rss->mdev = mdev; + rss->inner_ft_support = inner_ft_support; + rss->drop_rqn = drop_rqn; + + err = mlx5e_rss_init_no_tirs(rss); if (err) - goto err_out; + goto err_free_indir; + + if (type == MLX5E_RSS_INIT_NO_TIRS) + goto out; err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false); if (err) @@ -315,14 +391,18 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, goto err_destroy_tirs; } - return 0; +out: + return rss; err_destroy_tirs: mlx5e_rss_destroy_tirs(rss, false); err_destroy_rqt: mlx5e_rqt_destroy(&rss->rqt); -err_out: - return err; +err_free_indir: + mlx5e_rss_params_indir_cleanup(&rss->indir); +err_free_rss: + kvfree(rss); + return ERR_PTR(err); } int mlx5e_rss_cleanup(struct mlx5e_rss *rss) @@ -336,6 +416,8 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss) mlx5e_rss_destroy_tirs(rss, true); mlx5e_rqt_destroy(&rss->rqt); + mlx5e_rss_params_indir_cleanup(&rss->indir); + kvfree(rss); return 0; } @@ -470,11 +552,9 @@ inner_tir: int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) { - unsigned int i; - if (indir) - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) - indir[i] = rss->indir.table[i]; + memcpy(indir, rss->indir.table, + rss->indir.actual_table_size * sizeof(*rss->indir.table)); if (key) memcpy(key, rss->hash.toeplitz_hash_key, @@ -495,11 +575,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, struct mlx5e_rss *old_rss; int err = 0; - old_rss = mlx5e_rss_alloc(); - if (!old_rss) - return -ENOMEM; - - *old_rss = *rss; + old_rss = mlx5e_rss_init_copy(rss); + if (IS_ERR(old_rss)) + return PTR_ERR(old_rss); if (hfunc && *hfunc != rss->hash.hfunc) { switch (*hfunc) { @@ -523,18 +601,16 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, } if (indir) { - unsigned int i; - changed_indir = true; - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) - rss->indir.table[i] = indir[i]; + memcpy(rss->indir.table, indir, + rss->indir.actual_table_size * sizeof(*rss->indir.table)); } if (changed_indir && rss->enabled) { err = mlx5e_rss_apply(rss, rqns, num_rqns); if (err) { - *rss = *old_rss; + mlx5e_rss_copy(rss, old_rss); goto out; } } @@ -543,7 +619,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, mlx5e_rss_update_tirs(rss); out: - mlx5e_rss_free(old_rss); + mlx5e_rss_params_indir_cleanup(&old_rss->indir); + kvfree(old_rss); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h index c6b2164163..d1d0bc350e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -8,18 +8,24 @@ #include "tir.h" #include "fs.h" +enum mlx5e_rss_init_type { + MLX5E_RSS_INIT_NO_TIRS = 0, + MLX5E_RSS_INIT_TIRS +}; + struct mlx5e_rss_params_traffic_type mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt); struct mlx5e_rss; -struct mlx5e_rss *mlx5e_rss_alloc(void); -void mlx5e_rss_free(struct mlx5e_rss *rss); -int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn, - const struct mlx5e_packet_merge_param *init_pkt_merge_param); -int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn); +int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev, + u32 actual_table_size, u32 max_table_size); +void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir); +void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels); +struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + enum mlx5e_rss_init_type type, unsigned int nch, + unsigned int max_nch); int mlx5e_rss_cleanup(struct mlx5e_rss *rss); void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index 56e6b8c750..b23e224e37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -18,7 +18,7 @@ struct mlx5e_rx_res { struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; bool rss_active; - u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; + u32 *rss_rqns; unsigned int rss_nch; struct { @@ -34,41 +34,42 @@ struct mlx5e_rx_res { /* API for rx_res_rss_* */ +void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch) +{ + int i; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + if (res->rss[i]) + mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch); + } +} + static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, unsigned int init_nch) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; struct mlx5e_rss *rss; - int err; if (WARN_ON(res->rss[0])) return -EINVAL; - rss = mlx5e_rss_alloc(); - if (!rss) - return -ENOMEM; - - err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn, - &res->pkt_merge_param); - if (err) - goto err_rss_free; + rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn, + &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch); + if (IS_ERR(rss)) + return PTR_ERR(rss); mlx5e_rss_set_indir_uniform(rss, init_nch); res->rss[0] = rss; return 0; - -err_rss_free: - mlx5e_rss_free(rss); - return err; } int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; struct mlx5e_rss *rss; - int err, i; + int i; for (i = 1; i < MLX5E_MAX_NUM_RSS; i++) if (!res->rss[i]) @@ -77,13 +78,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i if (i == MLX5E_MAX_NUM_RSS) return -ENOSPC; - rss = mlx5e_rss_alloc(); - if (!rss) - return -ENOMEM; - - err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn); - if (err) - goto err_rss_free; + rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn, + &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch, + res->max_nch); + if (IS_ERR(rss)) + return PTR_ERR(rss); mlx5e_rss_set_indir_uniform(rss, init_nch); if (res->rss_active) @@ -93,10 +92,6 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i *rss_idx = i; return 0; - -err_rss_free: - mlx5e_rss_free(rss); - return err; } static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) @@ -108,7 +103,6 @@ static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) if (err) return err; - mlx5e_rss_free(rss); res->rss[rss_idx] = NULL; return 0; @@ -284,9 +278,27 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx) /* End of API rx_res_rss_* */ -struct mlx5e_rx_res *mlx5e_rx_res_alloc(void) +static void mlx5e_rx_res_free(struct mlx5e_rx_res *res) { - return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL); + kvfree(res->rss_rqns); + kvfree(res); +} + +static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch) +{ + struct mlx5e_rx_res *rx_res; + + rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL); + if (!rx_res) + return NULL; + + rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL); + if (!rx_res->rss_rqns) { + kvfree(rx_res); + return NULL; + } + + return rx_res; } static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) @@ -308,7 +320,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) for (ix = 0; ix < res->max_nch; ix++) { err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt, - res->mdev, false, res->drop_rqn); + res->mdev, false, res->drop_rqn, + mlx5e_rqt_size(res->mdev, res->max_nch)); if (err) { mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n", err, ix); @@ -362,7 +375,8 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) if (!builder) return -ENOMEM; - err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn); + err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn, + mlx5e_rqt_size(res->mdev, res->max_nch)); if (err) goto out; @@ -404,13 +418,19 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res) mlx5e_rqt_destroy(&res->ptp.rqt); } -int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, - enum mlx5e_rx_res_features features, unsigned int max_nch, - u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, - unsigned int init_nch) +struct mlx5e_rx_res * +mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features, + unsigned int max_nch, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + unsigned int init_nch) { + struct mlx5e_rx_res *res; int err; + res = mlx5e_rx_res_alloc(mdev, max_nch); + if (!res) + return ERR_PTR(-ENOMEM); + res->mdev = mdev; res->features = features; res->max_nch = max_nch; @@ -421,7 +441,7 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, err = mlx5e_rx_res_rss_init_def(res, init_nch); if (err) - goto err_out; + goto err_rx_res_free; err = mlx5e_rx_res_channels_init(res); if (err) @@ -431,14 +451,15 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, if (err) goto err_channels_destroy; - return 0; + return res; err_channels_destroy: mlx5e_rx_res_channels_destroy(res); err_rss_destroy: __mlx5e_rx_res_rss_destroy(res, 0); -err_out: - return err; +err_rx_res_free: + mlx5e_rx_res_free(res); + return ERR_PTR(err); } void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) @@ -446,11 +467,7 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) mlx5e_rx_res_ptp_destroy(res); mlx5e_rx_res_channels_destroy(res); mlx5e_rx_res_rss_destroy_all(res); -} - -void mlx5e_rx_res_free(struct mlx5e_rx_res *res) -{ - kvfree(res); + mlx5e_rx_res_free(res); } u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index 580fe8bc3c..82aaba8a82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -21,13 +21,12 @@ enum mlx5e_rx_res_features { }; /* Setup */ -struct mlx5e_rx_res *mlx5e_rx_res_alloc(void); -int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, - enum mlx5e_rx_res_features features, unsigned int max_nch, - u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, - unsigned int init_nch); +struct mlx5e_rx_res * +mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features, + unsigned int max_nch, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + unsigned int init_nch); void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res); -void mlx5e_rx_res_free(struct mlx5e_rx_res *res); /* TIRN getters for flow steering */ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix); @@ -60,6 +59,7 @@ int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx); int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res); int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss); struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); +void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch); /* Workaround for hairpin */ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 00a04fdd75..8dfb57f712 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -302,6 +302,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, e->encap_size = ipv4_encap_size; e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -313,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv4_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -407,6 +408,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, e->encap_size = ipv4_encap_size; kfree(e->encap_header); e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -418,8 +420,8 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv4_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -570,6 +572,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->encap_size = ipv6_encap_size; e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -581,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv6_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -674,6 +677,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, e->encap_size = ipv6_encap_size; kfree(e->encap_header); e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -685,8 +689,8 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv6_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index b723ff5e52..13c7ed1bb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -895,7 +895,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) mlx5e_xmit_xdp_doorbell(xdpsq); if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { - xdp_do_flush_map(); + xdp_do_flush(); __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index e2ffc572de..05612d9c60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -56,7 +56,7 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; } -static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work) +static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work) { struct mlx5e_ipsec_dwork *dwork = container_of(_work, struct mlx5e_ipsec_dwork, dwork.work); @@ -520,9 +520,15 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, return -EINVAL; } - if (x->lft.hard_byte_limit != XFRM_INF || - x->lft.soft_byte_limit != XFRM_INF) { - NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes"); + if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit && + x->lft.hard_byte_limit != XFRM_INF) { + /* XFRM stack doesn't prevent such configuration :(. */ + NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one"); + return -EINVAL; + } + + if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) { + NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0"); return -EINVAL; } @@ -658,11 +664,10 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) return 0; - if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT) - return 0; - if (x->lft.soft_packet_limit == XFRM_INF && - x->lft.hard_packet_limit == XFRM_INF) + x->lft.hard_packet_limit == XFRM_INF && + x->lft.soft_byte_limit == XFRM_INF && + x->lft.hard_byte_limit == XFRM_INF) return 0; dwork = kzalloc(sizeof(*dwork), GFP_KERNEL); @@ -670,7 +675,7 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) return -ENOMEM; dwork->sa_entry = sa_entry; - INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit); + INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits); sa_entry->dwork = dwork; return 0; } @@ -884,6 +889,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); ipsec->mdev = priv->mdev; + init_completion(&ipsec->comp); ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0, priv->netdev->name); if (!ipsec->wq) @@ -904,7 +910,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) } ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv); - ret = mlx5e_accel_ipsec_fs_init(ipsec); + ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom); if (ret) goto err_fs_init; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 9e7c42c2f7..adaea34931 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -38,6 +38,7 @@ #include #include #include "lib/aso.h" +#include "lib/devcom.h" #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L @@ -188,11 +189,19 @@ struct mlx5e_ipsec_ft { u32 refcnt; }; +struct mlx5e_ipsec_drop { + struct mlx5_flow_handle *rule; + struct mlx5_fc *fc; +}; + struct mlx5e_ipsec_rule { struct mlx5_flow_handle *rule; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_fc *fc; + struct mlx5e_ipsec_drop replay; + struct mlx5e_ipsec_drop auth; + struct mlx5e_ipsec_drop trailer; }; struct mlx5e_ipsec_miss { @@ -200,19 +209,6 @@ struct mlx5e_ipsec_miss { struct mlx5_flow_handle *rule; }; -struct mlx5e_ipsec_rx { - struct mlx5e_ipsec_ft ft; - struct mlx5e_ipsec_miss pol; - struct mlx5e_ipsec_miss sa; - struct mlx5e_ipsec_rule status; - struct mlx5e_ipsec_miss status_drop; - struct mlx5_fc *status_drop_cnt; - struct mlx5e_ipsec_fc *fc; - struct mlx5_fs_chains *chains; - u8 allow_tunnel_mode : 1; - struct xarray ipsec_obj_id_map; -}; - struct mlx5e_ipsec_tx_create_attr { int prio; int pol_level; @@ -221,12 +217,20 @@ struct mlx5e_ipsec_tx_create_attr { enum mlx5_flow_namespace_type chains_ns; }; +struct mlx5e_ipsec_mpv_work { + int event; + struct work_struct work; + struct mlx5e_priv *slave_priv; + struct mlx5e_priv *master_priv; +}; + struct mlx5e_ipsec { struct mlx5_core_dev *mdev; struct xarray sadb; struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_hw_stats hw_stats; struct workqueue_struct *wq; + struct completion comp; struct mlx5e_flow_steering *fs; struct mlx5e_ipsec_rx *rx_ipv4; struct mlx5e_ipsec_rx *rx_ipv6; @@ -238,6 +242,8 @@ struct mlx5e_ipsec { struct notifier_block netevent_nb; struct mlx5_ipsec_fs *roce; u8 is_uplink_rep: 1; + struct mlx5e_ipsec_mpv_work mpv_work; + struct xarray ipsec_obj_id_map; }; struct mlx5e_ipsec_esn_state { @@ -302,7 +308,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); -int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, struct mlx5_devcom_comp_dev **devcom); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); @@ -328,6 +334,10 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs); +void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv); +void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event); + static inline struct mlx5_core_dev * mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) { @@ -363,6 +373,15 @@ static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } + +static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv) +{ +} + +static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) +{ +} #endif #endif /* __MLX5E_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 81e6aa6434..41a2543a52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -32,6 +32,22 @@ struct mlx5e_ipsec_tx { u8 allow_tunnel_mode : 1; }; +struct mlx5e_ipsec_status_checks { + struct mlx5_flow_group *drop_all_group; + struct mlx5e_ipsec_drop all; +}; + +struct mlx5e_ipsec_rx { + struct mlx5e_ipsec_ft ft; + struct mlx5e_ipsec_miss pol; + struct mlx5e_ipsec_miss sa; + struct mlx5e_ipsec_rule status; + struct mlx5e_ipsec_status_checks status_drops; + struct mlx5e_ipsec_fc *fc; + struct mlx5_fs_chains *chains; + u8 allow_tunnel_mode : 1; +}; + /* IPsec RX flow steering */ static enum mlx5_traffic_types family2tt(u32 family) { @@ -131,9 +147,9 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns, static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx) { - mlx5_del_flow_rules(rx->status_drop.rule); - mlx5_destroy_flow_group(rx->status_drop.group); - mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt); + mlx5_del_flow_rules(rx->status_drops.all.rule); + mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc); + mlx5_destroy_flow_group(rx->status_drops.drop_all_group); } static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, @@ -149,8 +165,149 @@ static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, #endif } -static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx) +static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5e_ipsec_rx *rx) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + sa_entry->ipsec_rule.auth.fc = flow_counter; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + flow_act.flags = FLOW_ACT_NO_APPEND; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; + } + sa_entry->ipsec_rule.auth.rule = rule; + + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt_2; + } + sa_entry->ipsec_rule.trailer.fc = flow_counter; + + dest.counter_id = mlx5_fc_id(flow_counter); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule_2; + } + sa_entry->ipsec_rule.trailer.rule = rule; + + kvfree(spec); + return 0; + +err_rule_2: + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc); +err_cnt_2: + mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule); +err_rule: + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc); +err_cnt: + kvfree(spec); + return err; +} + +static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + flow_act.flags = FLOW_ACT_NO_APPEND; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; + } + + sa_entry->ipsec_rule.replay.rule = rule; + sa_entry->ipsec_rule.replay.fc = flow_counter; + + kvfree(spec); + return 0; + +err_rule: + mlx5_fc_destroy(mdev, flow_counter); +err_cnt: + kvfree(spec); + return err; +} + +static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table *ft = rx->ft.status; @@ -202,9 +359,9 @@ static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec, goto err_rule; } - rx->status_drop.group = g; - rx->status_drop.rule = rule; - rx->status_drop_cnt = flow_counter; + rx->status_drops.drop_all_group = g; + rx->status_drops.all.rule = rule; + rx->status_drops.all.fc = flow_counter; kvfree(flow_group_in); kvfree(spec); @@ -235,8 +392,12 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_4); MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 0); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_4, 0); if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; @@ -273,7 +434,7 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, { int err; - err = ipsec_rx_status_drop_create(ipsec, rx); + err = ipsec_rx_status_drop_all_create(ipsec, rx); if (err) return err; @@ -332,6 +493,83 @@ out: return err; } +static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family) +{ + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET); + struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false); + struct mlx5_flow_destination old_dest, new_dest; + + old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false), + family2tt(family)); + + mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family, + MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO); + + new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); + new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); + mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); +} + +static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family) +{ + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET); + struct mlx5_flow_destination old_dest, new_dest; + + old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); + old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false), + family2tt(family)); + mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); + mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); + + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev); +} + +static void ipsec_mpv_work_handler(struct work_struct *_work) +{ + struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work); + struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec; + + switch (work->event) { + case MPV_DEVCOM_IPSEC_MASTER_UP: + mutex_lock(&ipsec->tx->ft.mutex); + if (ipsec->tx->ft.refcnt) + mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol, + true); + mutex_unlock(&ipsec->tx->ft.mutex); + + mutex_lock(&ipsec->rx_ipv4->ft.mutex); + if (ipsec->rx_ipv4->ft.refcnt) + handle_ipsec_rx_bringup(ipsec, AF_INET); + mutex_unlock(&ipsec->rx_ipv4->ft.mutex); + + mutex_lock(&ipsec->rx_ipv6->ft.mutex); + if (ipsec->rx_ipv6->ft.refcnt) + handle_ipsec_rx_bringup(ipsec, AF_INET6); + mutex_unlock(&ipsec->rx_ipv6->ft.mutex); + break; + case MPV_DEVCOM_IPSEC_MASTER_DOWN: + mutex_lock(&ipsec->tx->ft.mutex); + if (ipsec->tx->ft.refcnt) + mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev); + mutex_unlock(&ipsec->tx->ft.mutex); + + mutex_lock(&ipsec->rx_ipv4->ft.mutex); + if (ipsec->rx_ipv4->ft.refcnt) + handle_ipsec_rx_cleanup(ipsec, AF_INET); + mutex_unlock(&ipsec->rx_ipv4->ft.mutex); + + mutex_lock(&ipsec->rx_ipv6->ft.mutex); + if (ipsec->rx_ipv6->ft.refcnt) + handle_ipsec_rx_cleanup(ipsec, AF_INET6); + mutex_unlock(&ipsec->rx_ipv6->ft.mutex); + break; + } + + complete(&work->master_priv->ipsec->comp); +} + static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family) { struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); @@ -362,7 +600,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, mlx5_ipsec_rx_status_destroy(ipsec, rx); mlx5_destroy_flow_table(rx->ft.status); - mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); } static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, @@ -440,7 +678,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, if (err) return err; - ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0); + ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft_status; @@ -517,7 +755,7 @@ err_fs_ft: err_add: mlx5_destroy_flow_table(rx->ft.status); err_fs_ft_status: - mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); return err; } @@ -657,7 +895,7 @@ err_rule: static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, struct mlx5_ipsec_fs *roce) { - mlx5_ipsec_fs_roce_tx_destroy(roce); + mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev); if (tx->chains) { ipsec_chains_destroy(tx->chains); } else { @@ -760,7 +998,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, } connect_roce: - err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol); + err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false); if (err) goto err_roce; return 0; @@ -1079,29 +1317,48 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 struct mlx5_flow_act *flow_act) { enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir); - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_core_dev *mdev = ipsec->mdev; struct mlx5_modify_hdr *modify_hdr; + u8 num_of_actions = 1; - MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET); switch (dir) { case XFRM_DEV_OFFLOAD_IN: - MLX5_SET(set_action_in, action, field, + MLX5_SET(set_action_in, action[0], field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + + num_of_actions++; + MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2); + MLX5_SET(set_action_in, action[1], data, val); + MLX5_SET(set_action_in, action[1], offset, 0); + MLX5_SET(set_action_in, action[1], length, 32); + + if (type == XFRM_DEV_OFFLOAD_CRYPTO) { + num_of_actions++; + MLX5_SET(set_action_in, action[2], action_type, + MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[2], field, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4); + MLX5_SET(set_action_in, action[2], data, 0); + MLX5_SET(set_action_in, action[2], offset, 0); + MLX5_SET(set_action_in, action[2], length, 32); + } break; case XFRM_DEV_OFFLOAD_OUT: - MLX5_SET(set_action_in, action, field, + MLX5_SET(set_action_in, action[0], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_4); break; default: return -EINVAL; } - MLX5_SET(set_action_in, action, data, val); - MLX5_SET(set_action_in, action, offset, 0); - MLX5_SET(set_action_in, action, length, 32); + MLX5_SET(set_action_in, action[0], data, val); + MLX5_SET(set_action_in, action[0], offset, 0); + MLX5_SET(set_action_in, action[0], length, 32); - modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action); + modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action); if (IS_ERR(modify_hdr)) { mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n", PTR_ERR(modify_hdr)); @@ -1354,15 +1611,17 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) setup_fte_no_frags(spec); setup_fte_upper_proto_match(spec, &attrs->upspec); - if (rx != ipsec->rx_esw) - err = setup_modify_header(ipsec, attrs->type, - sa_entry->ipsec_obj_id | BIT(31), - XFRM_DEV_OFFLOAD_IN, &flow_act); - else - err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act); + if (!attrs->drop) { + if (rx != ipsec->rx_esw) + err = setup_modify_header(ipsec, attrs->type, + sa_entry->ipsec_obj_id | BIT(31), + XFRM_DEV_OFFLOAD_IN, &flow_act); + else + err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act); - if (err) - goto err_mod_header; + if (err) + goto err_mod_header; + } switch (attrs->type) { case XFRM_DEV_OFFLOAD_PACKET: @@ -1398,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err); goto err_add_flow; } + if (attrs->type == XFRM_DEV_OFFLOAD_PACKET) + err = rx_add_rule_drop_replay(sa_entry, rx); + if (err) + goto err_add_replay; + + err = rx_add_rule_drop_auth_trailer(sa_entry, rx); + if (err) + goto err_drop_reason; + kvfree(spec); sa_entry->ipsec_rule.rule = rule; @@ -1406,13 +1674,21 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat; return 0; +err_drop_reason: + if (sa_entry->ipsec_rule.replay.rule) { + mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule); + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc); + } +err_add_replay: + mlx5_del_flow_rules(rule); err_add_flow: mlx5_fc_destroy(mdev, counter); err_add_cnt: if (flow_act.pkt_reformat) mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); err_pkt_reformat: - mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); + if (flow_act.modify_hdr) + mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); err_mod_header: kvfree(spec); err_alloc: @@ -1913,7 +2189,19 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) return; } - mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); + if (ipsec_rule->modify_hdr) + mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); + + mlx5_del_flow_rules(ipsec_rule->trailer.rule); + mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc); + + mlx5_del_flow_rules(ipsec_rule->auth.rule); + mlx5_fc_destroy(mdev, ipsec_rule->auth.fc); + + if (ipsec_rule->replay.rule) { + mlx5_del_flow_rules(ipsec_rule->replay.rule); + mlx5_fc_destroy(mdev, ipsec_rule->replay.fc); + } mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry); rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type); } @@ -1984,7 +2272,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) kfree(ipsec->rx_ipv6); if (ipsec->is_uplink_rep) { - xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map); + xa_destroy(&ipsec->ipsec_obj_id_map); mutex_destroy(&ipsec->tx_esw->ft.mutex); WARN_ON(ipsec->tx_esw->ft.refcnt); @@ -1996,7 +2284,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) } } -int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, + struct mlx5_devcom_comp_dev **devcom) { struct mlx5_core_dev *mdev = ipsec->mdev; struct mlx5_flow_namespace *ns, *ns_esw; @@ -2046,9 +2335,11 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) mutex_init(&ipsec->tx_esw->ft.mutex); mutex_init(&ipsec->rx_esw->ft.mutex); ipsec->tx_esw->ns = ns_esw; - xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1); + xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1); } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) { - ipsec->roce = mlx5_ipsec_fs_roce_init(mdev); + ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom); + } else { + mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n"); } return 0; @@ -2095,3 +2386,33 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry) return rx->allow_tunnel_mode; } + +void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv) +{ + struct mlx5e_ipsec_mpv_work *work; + + reinit_completion(&master_priv->ipsec->comp); + + if (!slave_priv->ipsec) { + complete(&master_priv->ipsec->comp); + return; + } + + work = &slave_priv->ipsec->mpv_work; + + INIT_WORK(&work->work, ipsec_mpv_work_handler); + work->event = event; + work->slave_priv = slave_priv; + work->master_priv = master_priv; + queue_work(slave_priv->ipsec->wq, &work->work); +} + +void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) +{ + if (!priv->ipsec) + return; /* IPsec not supported */ + + mlx5_devcom_send_event(priv->devcom, event, event, priv); + wait_for_completion(&priv->ipsec->comp); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index ce29e31721..6e00afe467 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -5,6 +5,7 @@ #include "en.h" #include "ipsec.h" #include "lib/crypto.h" +#include "lib/ipsec_fs_roce.h" #include "fs_core.h" #include "eswitch.h" @@ -68,7 +69,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) caps |= MLX5_IPSEC_CAP_ESPINUDP; } - if (mlx5_get_roce_state(mdev) && + if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX) caps |= MLX5_IPSEC_CAP_ROCE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 9ee014a8ad..2ed99772f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -54,7 +54,6 @@ struct mlx5e_accel_tx_ipsec_state { #ifdef CONFIG_MLX5_EN_IPSEC -void mlx5e_ipsec_inverse_table_init(void); void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_offload *xo); void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 38263d5c98..c7c1b667b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1252,7 +1252,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv) { - return MLX5E_INDIR_RQT_SIZE; + return mlx5e_rqt_size(priv->mdev, priv->channels.params.num_channels); } static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 934b0d5ce1..777d311d44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1283,9 +1283,7 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs, mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params); fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev, &ttc_params); - if (IS_ERR(fs->inner_ttc)) - return PTR_ERR(fs->inner_ttc); - return 0; + return PTR_ERR_OR_ZERO(fs->inner_ttc); } int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs, @@ -1295,9 +1293,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs, mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true); fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params); - if (IS_ERR(fs->ttc)) - return PTR_ERR(fs->ttc); - return 0; + return PTR_ERR_OR_ZERO(fs->ttc); } int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c3961c2bbc..0c87ddb8a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -69,6 +69,7 @@ #include "en/htb.h" #include "qos.h" #include "en/trap.h" +#include "lib/devcom.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) @@ -178,6 +179,61 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) mlx5_notifier_unregister(priv->mdev, &priv->events_nb); } +static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data) +{ + struct mlx5e_priv *slave_priv = my_data; + + switch (event) { + case MPV_DEVCOM_MASTER_UP: + mlx5_devcom_comp_set_ready(slave_priv->devcom, true); + break; + case MPV_DEVCOM_MASTER_DOWN: + /* no need for comp set ready false since we unregister after + * and it hurts cleanup flow. + */ + break; + case MPV_DEVCOM_IPSEC_MASTER_UP: + case MPV_DEVCOM_IPSEC_MASTER_DOWN: + mlx5e_ipsec_handle_mpv_event(event, my_data, event_data); + break; + } + + return 0; +} + +static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data) +{ + priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc, + MLX5_DEVCOM_MPV, + *data, + mlx5e_devcom_event_mpv, + priv); + if (IS_ERR_OR_NULL(priv->devcom)) + return -EOPNOTSUPP; + + if (mlx5_core_is_mp_master(priv->mdev)) { + mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP, + MPV_DEVCOM_MASTER_UP, priv); + mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_UP); + } + + return 0; +} + +static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) +{ + if (IS_ERR_OR_NULL(priv->devcom)) + return; + + if (mlx5_core_is_mp_master(priv->mdev)) { + mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_DOWN, + MPV_DEVCOM_MASTER_DOWN, priv); + mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_DOWN); + } + + mlx5_devcom_unregister_component(priv->devcom); +} + static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) { struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb); @@ -192,6 +248,13 @@ static int blocking_event(struct notifier_block *nb, unsigned long event, void * return NOTIFY_BAD; } break; + case MLX5_DRIVER_EVENT_AFFILIATION_DONE: + if (mlx5e_devcom_init_mpv(priv, data)) + return NOTIFY_BAD; + break; + case MLX5_DRIVER_EVENT_AFFILIATION_REMOVED: + mlx5e_devcom_cleanup_mpv(priv); + break; default: return NOTIFY_DONE; } @@ -834,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, struct page_pool_params pp_params = { 0 }; pp_params.order = 0; - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = pool_size; pp_params.nid = node; pp_params.dev = rq->pdev; @@ -2886,8 +2949,12 @@ static int mlx5e_num_channels_changed(struct mlx5e_priv *priv) mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); /* This function may be called on attach, before priv->rx_res is created. */ - if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res) - mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count); + if (priv->rx_res) { + mlx5e_rx_res_rss_update_num_channels(priv->rx_res, count); + + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count); + } return 0; } @@ -5347,10 +5414,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) enum mlx5e_rx_res_features features; int err; - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) - return -ENOMEM; - mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -5362,12 +5425,16 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) features = MLX5E_RX_RES_FEATURE_PTP; if (mlx5_tunnel_inner_ft_supported(mdev)) features |= MLX5E_RX_RES_FEATURE_INNER_FT; - err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, - priv->max_nch, priv->drop_rq.rqn, - &priv->channels.params.packet_merge, - priv->channels.params.num_channels); - if (err) + + priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, + priv->channels.params.num_channels); + if (IS_ERR(priv->rx_res)) { + err = PTR_ERR(priv->rx_res); + priv->rx_res = NULL; + mlx5_core_err(mdev, "create rx resources failed, %d\n", err); goto err_close_drop_rq; + } err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile, priv->netdev); @@ -5397,12 +5464,11 @@ err_destroy_flow_steering: priv->profile); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = NULL; err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; return err; } @@ -5413,10 +5479,9 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), priv->profile); mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = NULL; mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; } static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 751d3ffcd2..e92d4f8359 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1010,26 +1010,22 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) { - err = -ENOMEM; - goto err_free_fs; - } - mlx5e_fs_init_l2_addr(priv->fs, priv->netdev); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_rx_res_free; + goto err_free_fs; } - err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, - priv->max_nch, priv->drop_rq.rqn, - &priv->channels.params.packet_merge, - priv->channels.params.num_channels); - if (err) + priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, + priv->channels.params.num_channels); + if (IS_ERR(priv->rx_res)) { + err = PTR_ERR(priv->rx_res); + mlx5_core_err(mdev, "Create rx resources failed, err=%d\n", err); goto err_close_drop_rq; + } err = mlx5e_create_rep_ttc_table(priv); if (err) @@ -1053,11 +1049,9 @@ err_destroy_ttc_table: mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); -err_rx_res_free: - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; err_free_fs: mlx5e_fs_cleanup(priv->fs); priv->fs = NULL; @@ -1071,9 +1065,8 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) mlx5e_destroy_rep_root_ft(priv); mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); mlx5e_close_drop_rq(&priv->drop_rq); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; } static void mlx5e_rep_mpesw_work(struct work_struct *work) @@ -1367,8 +1360,9 @@ mlx5e_rep_vnic_reporter_diagnose(struct devlink_health_reporter *reporter, struct mlx5e_rep_priv *rpriv = devlink_health_reporter_priv(reporter); struct mlx5_eswitch_rep *rep = rpriv->rep; - return mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg, - rep->vport, true); + mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg, rep->vport, + true); + return 0; } static const struct devlink_health_reporter_ops mlx5_rep_vnic_reporter_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index dc9b157a44..404dd1d9b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -756,19 +756,21 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) { struct mlx5e_priv *priv = hp->func_priv; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_rss_params_indir *indir; + struct mlx5e_rss_params_indir indir; int err; - indir = kvmalloc(sizeof(*indir), GFP_KERNEL); - if (!indir) - return -ENOMEM; + err = mlx5e_rss_params_indir_init(&indir, mdev, + mlx5e_rqt_size(mdev, hp->num_channels), + mlx5e_rqt_size(mdev, hp->num_channels)); + if (err) + return err; - mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); + mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels); err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, - indir); + &indir); - kvfree(indir); + mlx5e_rss_params_indir_cleanup(&indir); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c index d5d33c3b3a..190f10aba1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -50,7 +50,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, u32 mapped_id; int err; - err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id, + err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id, xa_mk_value(sa_entry->ipsec_obj_id), XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0); if (err) @@ -81,7 +81,7 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, return 0; err_header_alloc: - xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id); + xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id); return err; } @@ -90,7 +90,7 @@ void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5e_ipsec *ipsec = sa_entry->ipsec; if (sa_entry->rx_mapped_id) - xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, + xa_erase_bh(&ipsec->ipsec_obj_id_map, sa_entry->rx_mapped_id); } @@ -100,7 +100,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, struct mlx5e_ipsec *ipsec = priv->ipsec; void *val; - val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id); + val = xa_load(&ipsec->ipsec_obj_id_map, id); if (!val) return -ENOENT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 1887a24ee4..d2ebe56c39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ #include "eswitch.h" +#include "lib/mlx5.h" #include "esw/qos.h" #include "en/port.h" #define CREATE_TRACE_POINTS @@ -701,10 +702,75 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo return err; } +static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) +{ + struct ethtool_link_ksettings lksettings; + struct net_device *slave, *master; + u32 speed = SPEED_UNKNOWN; + + /* Lock ensures a stable reference to master and slave netdevice + * while port speed of master is queried. + */ + ASSERT_RTNL(); + + slave = mlx5_uplink_netdev_get(mdev); + if (!slave) + goto out; + + master = netdev_master_upper_dev_get(slave); + if (master && !__ethtool_get_link_ksettings(master, &lksettings)) + speed = lksettings.base.speed; + +out: + return speed; +} + +static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max, + bool hold_rtnl_lock, struct netlink_ext_ack *extack) +{ + int err; + + if (!mlx5_lag_is_active(mdev)) + goto skip_lag; + + if (hold_rtnl_lock) + rtnl_lock(); + + *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev); + + if (hold_rtnl_lock) + rtnl_unlock(); + + if (*link_speed_max != (u32)SPEED_UNKNOWN) + return 0; + +skip_lag: + err = mlx5_port_max_linkspeed(mdev, link_speed_max); + if (err) + NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); + + return err; +} + +static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev, + const char *name, u32 link_speed_max, + u64 value, struct netlink_ext_ack *extack) +{ + if (value > link_speed_max) { + pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", + name, value, link_speed_max); + NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); + return -EINVAL; + } + + return 0; +} + int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) { u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_vport *vport; + u32 link_speed_max; u32 bitmask; int err; @@ -712,6 +778,17 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 if (IS_ERR(vport)) return PTR_ERR(vport); + if (rate_mbps) { + err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL); + if (err) + return err; + + err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police", + link_speed_max, rate_mbps, NULL); + if (err) + return err; + } + mutex_lock(&esw->state_lock); if (!vport->qos.enabled) { /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ @@ -744,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char * u64 value; int err; - err = mlx5_port_max_linkspeed(mdev, &link_speed_max); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); - return err; - } - value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder); if (remainder) { pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n", @@ -758,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char * return -EINVAL; } - if (value > link_speed_max) { - pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", - name, value, link_speed_max); - NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); - return -EINVAL; - } + err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack); + if (err) + return err; + + err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack); + if (err) + return err; *rate = value; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index 3ec892d51f..d91ea53eb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -441,8 +441,3 @@ int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int ev return blocking_notifier_call_chain(&events->sw_nh, event, data); } - -void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work) -{ - queue_work(dev->priv.events->wq, work); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a13b9c2bd1..e6bfa7e4f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -114,9 +114,9 @@ #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, - * IPsec RoCE policy + * {IPsec RoCE MPV,Alias table},IPsec RoCE policy */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 9 +#define KERNEL_NIC_PRIO_NUM_LEVELS 11 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) @@ -137,7 +137,7 @@ #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) #define KERNEL_TX_IPSEC_NUM_PRIOS 1 -#define KERNEL_TX_IPSEC_NUM_LEVELS 3 +#define KERNEL_TX_IPSEC_NUM_LEVELS 4 #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) #define KERNEL_TX_MACSEC_NUM_PRIOS 1 @@ -231,7 +231,7 @@ enum { }; #define RDMA_RX_IPSEC_NUM_PRIOS 1 -#define RDMA_RX_IPSEC_NUM_LEVELS 2 +#define RDMA_RX_IPSEC_NUM_LEVELS 4 #define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS) #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS @@ -288,7 +288,7 @@ enum { #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) -#define RDMA_TX_IPSEC_NUM_PRIOS 1 +#define RDMA_TX_IPSEC_NUM_PRIOS 2 #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1 #define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 2fb2598b77..8ff6dc9bc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -365,6 +365,8 @@ static const char *hsynd_str(u8 synd) return "FFSER error"; case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR: return "High temperature"; + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR: + return "ICM fetch PCI data poisoned error"; default: return "unrecognized error"; } @@ -448,14 +450,15 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - u8 synd; - int err; + u8 synd = ioread8(&h->synd); - synd = ioread8(&h->synd); - err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); - if (err || !synd) - return err; - return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); + if (!synd) + return 0; + + devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); + devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); + + return 0; } struct mlx5_fw_reporter_ctx { @@ -463,94 +466,47 @@ struct mlx5_fw_reporter_ctx { int miss_counter; }; -static int +static void mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg, struct mlx5_fw_reporter_ctx *fw_reporter_ctx) { - int err; - - err = devlink_fmsg_u8_pair_put(fmsg, "syndrome", - fw_reporter_ctx->err_synd); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", - fw_reporter_ctx->miss_counter); - if (err) - return err; - return 0; + devlink_fmsg_u8_pair_put(fmsg, "syndrome", fw_reporter_ctx->err_synd); + devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", fw_reporter_ctx->miss_counter); } -static int +static void mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev, struct devlink_fmsg *fmsg) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u8 rfr_severity; - int err; int i; if (!ioread8(&h->synd)) - return 0; - - err = devlink_fmsg_pair_nest_start(fmsg, "health buffer"); - if (err) - return err; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var"); - if (err) - return err; + return; - for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) { - err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i)); - if (err) - return err; - } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr", - ioread32be(&h->assert_exit_ptr)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra", - ioread32be(&h->assert_callra)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); - if (err) - return err; + devlink_fmsg_pair_nest_start(fmsg, "health buffer"); + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var"); + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) + devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i)); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr", + ioread32be(&h->assert_exit_ptr)); + devlink_fmsg_u32_pair_put(fmsg, "assert_callra", + ioread32be(&h->assert_callra)); + devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time)); + devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); rfr_severity = ioread8(&h->rfr_severity); - err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity)); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity)); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index", - ioread8(&h->irisc_index)); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", - ioread16be(&h->ext_synd)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", - ioread32be(&h->fw_ver)); - if (err) - return err; - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - return devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity)); + devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity)); + devlink_fmsg_u8_pair_put(fmsg, "irisc_index", ioread8(&h->irisc_index)); + devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd)); + devlink_fmsg_u32_pair_put(fmsg, "ext_synd", ioread16be(&h->ext_synd)); + devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", ioread32be(&h->fw_ver)); + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } static int @@ -568,14 +524,11 @@ mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter, if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; - err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); - if (err) - return err; + mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); } - err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg); - if (err) - return err; + mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg); + return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg); } @@ -641,12 +594,10 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; - err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); - if (err) - goto free_data; + mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); } - err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size); + devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size); free_data: kvfree(cr_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index baa7ef8123..2bf77a5251 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -418,12 +418,6 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) return -ENOMEM; } - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) { - err = -ENOMEM; - goto err_free_fs; - } - mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -432,12 +426,13 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, - priv->max_nch, priv->drop_rq.rqn, - &priv->channels.params.packet_merge, - priv->channels.params.num_channels); - if (err) + priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, + priv->channels.params.num_channels); + if (IS_ERR(priv->rx_res)) { + err = PTR_ERR(priv->rx_res); goto err_close_drop_rq; + } err = mlx5i_create_flow_steering(priv); if (err) @@ -447,13 +442,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; -err_free_fs: mlx5e_fs_cleanup(priv->fs); return err; } @@ -462,10 +455,9 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { mlx5i_destroy_flow_steering(priv); mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; mlx5e_fs_cleanup(priv->fs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index af3fac090b..d14459e5c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -943,6 +943,26 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } } +/* The last mdev to unregister will destroy the workqueue before removing the + * devcom component, and as all the mdevs use the same devcom component we are + * guaranteed that the devcom is valid while the calling work is running. + */ +struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev) +{ + struct mlx5_devcom_comp_dev *devcom = NULL; + int i; + + mutex_lock(&ldev->lock); + for (i = 0; i < ldev->ports; i++) { + if (ldev->pf[i].dev) { + devcom = ldev->pf[i].dev->priv.hca_devcom_comp; + break; + } + } + mutex_unlock(&ldev->lock); + return devcom; +} + static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) { queue_delayed_work(ldev->wq, &ldev->bond_work, delay); @@ -953,9 +973,14 @@ static void mlx5_do_bond_work(struct work_struct *work) struct delayed_work *delayed_work = to_delayed_work(work); struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, bond_work); + struct mlx5_devcom_comp_dev *devcom; int status; - status = mlx5_dev_list_trylock(); + devcom = mlx5_lag_get_devcom_comp(ldev); + if (!devcom) + return; + + status = mlx5_devcom_comp_trylock(devcom); if (!status) { mlx5_queue_bond_work(ldev, HZ); return; @@ -964,14 +989,14 @@ static void mlx5_do_bond_work(struct work_struct *work) mutex_lock(&ldev->lock); if (ldev->mode_changes_in_progress) { mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(devcom); mlx5_queue_bond_work(ldev, HZ); return; } mlx5_do_bond(ldev); mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(devcom); } static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, @@ -1212,13 +1237,14 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, dev->priv.lag = NULL; } -/* Must be called with intf_mutex held */ +/* Must be called with HCA devcom component lock held */ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) { + struct mlx5_devcom_comp_dev *pos = NULL; struct mlx5_lag *ldev = NULL; struct mlx5_core_dev *tmp_dev; - tmp_dev = mlx5_get_next_phys_dev_lag(dev); + tmp_dev = mlx5_devcom_get_next_peer_data(dev->priv.hca_devcom_comp, &pos); if (tmp_dev) ldev = mlx5_lag_dev(tmp_dev); @@ -1275,10 +1301,13 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) if (!mlx5_lag_is_supported(dev)) return; + if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp)) + return; + recheck: - mlx5_dev_list_lock(); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); err = __mlx5_lag_dev_add_mdev(dev); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); if (err) { msleep(100); @@ -1431,7 +1460,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) if (!ldev) return; - mlx5_dev_list_lock(); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); mutex_lock(&ldev->lock); ldev->mode_changes_in_progress++; @@ -1439,7 +1468,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) mlx5_disable_lag(ldev); mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } void mlx5_lag_enable_change(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 481e92f39f..50fcb1eee5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -112,6 +112,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev); void mlx5_lag_remove_devices(struct mlx5_lag *ldev); int mlx5_deactivate_lag(struct mlx5_lag *ldev); void mlx5_lag_add_devices(struct mlx5_lag *ldev); +struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev); static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 4bf1539152..82889f3050 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -65,12 +65,12 @@ err_metadata: return err; } -#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2 +#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4 static int enable_mpesw(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; int err; + int i; if (ldev->mode != MLX5_LAG_MODE_NONE) return -EINVAL; @@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - err = mlx5_eswitch_reload_reps(dev0->priv.eswitch); - if (!err) - err = mlx5_eswitch_reload_reps(dev1->priv.eswitch); - if (err) - goto err_rescan_drivers; + for (i = 0; i < ldev->ports; i++) { + err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + if (err) + goto err_rescan_drivers; + } return 0; @@ -112,8 +112,8 @@ err_rescan_drivers: mlx5_deactivate_lag(ldev); err_add_devices: mlx5_lag_add_devices(ldev); - mlx5_eswitch_reload_reps(dev0->priv.eswitch); - mlx5_eswitch_reload_reps(dev1->priv.eswitch); + for (i = 0; i < ldev->ports; i++) + mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); mlx5_mpesw_metadata_cleanup(ldev); return err; } @@ -129,9 +129,14 @@ static void disable_mpesw(struct mlx5_lag *ldev) static void mlx5_mpesw_work(struct work_struct *work) { struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); + struct mlx5_devcom_comp_dev *devcom; struct mlx5_lag *ldev = mpesww->lag; - mlx5_dev_list_lock(); + devcom = mlx5_lag_get_devcom_comp(ldev); + if (!devcom) + return; + + mlx5_devcom_comp_lock(devcom); mutex_lock(&ldev->lock); if (ldev->mode_changes_in_progress) { mpesww->result = -EAGAIN; @@ -144,7 +149,7 @@ static void mlx5_mpesw_work(struct work_struct *work) disable_mpesw(ldev); unlock: mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(devcom); complete(&mpesww->comp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 7d9bbb494d..101b3bb908 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -507,10 +507,7 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) mlx5_lag_set_outer_ttc_params(ldev, &ttc_params); port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params); - if (IS_ERR(port_sel->outer.ttc)) - return PTR_ERR(port_sel->outer.ttc); - - return 0; + return PTR_ERR_OR_ZERO(port_sel->outer.ttc); } static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) @@ -521,10 +518,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); - if (IS_ERR(port_sel->inner.ttc)) - return PTR_ERR(port_sel->inner.ttc); - - return 0; + return PTR_ERR_OR_ZERO(port_sel->inner.ttc); } int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index 00e67910e3..e8e50563e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -31,6 +31,7 @@ struct mlx5_devcom_comp { struct kref ref; bool ready; struct rw_semaphore sem; + struct lock_class_key lock_key; }; struct mlx5_devcom_comp_dev { @@ -119,6 +120,8 @@ mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler) comp->key = key; comp->handler = handler; init_rwsem(&comp->sem); + lockdep_register_key(&comp->lock_key); + lockdep_set_class(&comp->sem, &comp->lock_key); kref_init(&comp->ref); INIT_LIST_HEAD(&comp->comp_dev_list_head); @@ -133,6 +136,7 @@ mlx5_devcom_comp_release(struct kref *ref) mutex_lock(&comp_list_lock); list_del(&comp->comp_list); mutex_unlock(&comp_list_lock); + lockdep_unregister_key(&comp->lock_key); kfree(comp); } @@ -383,3 +387,24 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, *pos = tmp; return data; } + +void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom) +{ + if (IS_ERR_OR_NULL(devcom)) + return; + down_write(&devcom->comp->sem); +} + +void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom) +{ + if (IS_ERR_OR_NULL(devcom)) + return; + up_write(&devcom->comp->sem); +} + +int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom) +{ + if (IS_ERR_OR_NULL(devcom)) + return 0; + return down_write_trylock(&devcom->comp->sem); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 8389ac0af7..fc23bbef87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -8,6 +8,8 @@ enum mlx5_devcom_component { MLX5_DEVCOM_ESW_OFFLOADS, + MLX5_DEVCOM_MPV, + MLX5_DEVCOM_HCA_PORTS, MLX5_DEVCOM_NUM_COMPONENTS, }; @@ -51,4 +53,8 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, data; \ data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos)) +void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom); +void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom); +int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom); + #endif /* __LIB_MLX5_DEVCOM_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h index 69a7545977..4b7f7131c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -85,7 +85,6 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn); struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev); void mlx5_cq_tasklet_cb(struct tasklet_struct *t); -struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq); void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c index 6e3f178d6f..234cd00f71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c @@ -2,8 +2,11 @@ /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ #include "fs_core.h" +#include "fs_cmd.h" +#include "en.h" #include "lib/ipsec_fs_roce.h" #include "mlx5_core.h" +#include struct mlx5_ipsec_miss { struct mlx5_flow_group *group; @@ -15,6 +18,12 @@ struct mlx5_ipsec_rx_roce { struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; struct mlx5_ipsec_miss roce_miss; + struct mlx5_flow_table *nic_master_ft; + struct mlx5_flow_group *nic_master_group; + struct mlx5_flow_handle *nic_master_rule; + struct mlx5_flow_table *goto_alias_ft; + u32 alias_id; + char key[ACCESS_KEY_LEN]; struct mlx5_flow_table *ft_rdma; struct mlx5_flow_namespace *ns_rdma; @@ -24,6 +33,9 @@ struct mlx5_ipsec_tx_roce { struct mlx5_flow_group *g; struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; + struct mlx5_flow_table *goto_alias_ft; + u32 alias_id; + char key[ACCESS_KEY_LEN]; struct mlx5_flow_namespace *ns; }; @@ -31,6 +43,7 @@ struct mlx5_ipsec_fs { struct mlx5_ipsec_rx_roce ipv4_rx; struct mlx5_ipsec_rx_roce ipv6_rx; struct mlx5_ipsec_tx_roce tx; + struct mlx5_devcom_comp_dev **devcom; }; static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, @@ -43,11 +56,83 @@ static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport); } +static bool ipsec_fs_create_alias_supported_one(struct mlx5_core_dev *mdev) +{ + u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access); + u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported); + + if (!(obj_supp & + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS)) + return false; + + if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE)) + return false; + + return true; +} + +static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev, + struct mlx5_core_dev *master_mdev) +{ + if (ipsec_fs_create_alias_supported_one(mdev) && + ipsec_fs_create_alias_supported_one(master_mdev)) + return true; + + return false; +} + +static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner, + struct mlx5_core_dev *ibv_allowed, + struct mlx5_flow_table *ft, + u32 *obj_id, char *alias_key, bool from_event) +{ + u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id; + u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id); + struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {}; + struct mlx5_cmd_alias_obj_create_attr alias_attr = {}; + int ret; + int i; + + if (!ipsec_fs_create_alias_supported(ibv_owner, ibv_allowed)) + return -EOPNOTSUPP; + + for (i = 0; i < ACCESS_KEY_LEN; i++) + if (!from_event) + alias_key[i] = get_random_u64() & 0xFF; + + memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN); + allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + allow_attr.obj_id = aliased_object_id; + + if (!from_event) { + ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr); + if (ret) { + mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n", + ret); + return ret; + } + } + + memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN); + alias_attr.obj_id = aliased_object_id; + alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + alias_attr.vhca_id = vhca_id_to_be_accessed; + ret = mlx5_cmd_alias_obj_create(ibv_allowed, &alias_attr, obj_id); + if (ret) { + mlx5_core_err(ibv_allowed, "Failed to create alias object err=%d\n", + ret); + return ret; + } + + return 0; +} + static int ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, struct mlx5_flow_destination *default_dst, struct mlx5_ipsec_rx_roce *roce) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_flow_destination dst = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_handle *rule; @@ -61,14 +146,19 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; - dst.ft = roce->ft_rdma; + if (is_mpv_slave) { + dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dst.ft = roce->goto_alias_ft; + } else { + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_rdma; + } rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n", err); - goto fail_add_rule; + goto out; } roce->rule = rule; @@ -84,12 +174,30 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, roce->roce_miss.rule = rule; + if (!is_mpv_slave) + goto out; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_rdma; + rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst, + 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule for alias err=%d\n", + err); + goto fail_add_nic_master_rule; + } + roce->nic_master_rule = rule; + kvfree(spec); return 0; +fail_add_nic_master_rule: + mlx5_del_flow_rules(roce->roce_miss.rule); fail_add_default_rule: mlx5_del_flow_rules(roce->rule); -fail_add_rule: +out: kvfree(spec); return err; } @@ -120,25 +228,373 @@ out: return err; } -void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce) +static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft) { + struct mlx5_flow_destination dst = {}; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.source_vhca_port); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters.source_vhca_port, + MLX5_CAP_GEN(mdev, native_port_num)); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->goto_alias_ft; + rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n", + err); + goto out; + } + roce->rule = rule; + + /* No need for miss rule, since on miss we go to next PRIO, in which + * if master is configured, he will catch the traffic to go to his + * encryption table. + */ + +out: + kvfree(spec); + return err; +} + +#define MLX5_TX_ROCE_GROUP_SIZE BIT(0) +#define MLX5_IPSEC_RDMA_TX_FT_LEVEL 0 +#define MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL 3 /* Since last used level in NIC ipsec is 2 */ + +static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft, + struct mlx5e_priv *peer_priv, + bool from_event) +{ + struct mlx5_flow_namespace *roce_ns, *nic_ns; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table next_ft; + struct mlx5_flow_table *ft; + int err; + + roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC); + if (!roce_ns) + return -EOPNOTSUPP; + + nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); + if (!nic_ns) + return -EOPNOTSUPP; + + err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id, roce->key, + from_event); + if (err) + return err; + + next_ft.id = roce->alias_id; + ft_attr.max_fte = 1; + ft_attr.next_ft = &next_ft; + ft_attr.level = MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL; + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft = mlx5_create_flow_table(nic_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec goto alias ft err=%d\n", err); + goto destroy_alias; + } + + roce->goto_alias_ft = ft; + + memset(&ft_attr, 0, sizeof(ft_attr)); + ft_attr.max_fte = 1; + ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL; + ft = mlx5_create_flow_table(roce_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err); + goto destroy_alias_ft; + } + + roce->ft = ft; + + return 0; + +destroy_alias_ft: + mlx5_destroy_flow_table(roce->goto_alias_ft); +destroy_alias: + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + return err; +} + +static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft, + u32 *in) +{ + struct mlx5_flow_group *g; + int ix = 0; + int err; + u8 *mc; + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters.source_vhca_port); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TX_ROCE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err); + return err; + } + roce->g = g; + + err = ipsec_fs_roce_tx_mpv_rule_setup(mdev, roce, pol_ft); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err); + goto destroy_group; + } + + return 0; + +destroy_group: + mlx5_destroy_flow_group(roce->g); + return err; +} + +static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_flow_table *pol_ft, + u32 *in, bool from_event) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5_ipsec_tx_roce *roce; + struct mlx5e_priv *peer_priv; + int err; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return -EOPNOTSUPP; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + err = -EOPNOTSUPP; + goto release_peer; + } + + roce = &ipsec_roce->tx; + + err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv, from_event); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err); + goto release_peer; + } + + err = ipsec_fs_roce_tx_mpv_create_group_rules(mdev, roce, pol_ft, in); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group/rule err=%d\n", err); + goto destroy_tables; + } + + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return 0; + +destroy_tables: + mlx5_destroy_flow_table(roce->ft); + mlx5_destroy_flow_table(roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); +release_peer: + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return err; +} + +static void roce_rx_mpv_destroy_tables(struct mlx5_core_dev *mdev, struct mlx5_ipsec_rx_roce *roce) +{ + mlx5_destroy_flow_table(roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + mlx5_destroy_flow_group(roce->nic_master_group); + mlx5_destroy_flow_table(roce->nic_master_ft); +} + +#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) +#define MLX5_IPSEC_RX_IPV4_FT_LEVEL 3 +#define MLX5_IPSEC_RX_IPV6_FT_LEVEL 2 + +static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_flow_namespace *ns, + u32 family, u32 level, u32 prio) +{ + struct mlx5_flow_namespace *roce_ns, *nic_ns; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5_ipsec_rx_roce *roce; + struct mlx5_flow_table next_ft; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *g; + struct mlx5e_priv *peer_priv; + int ix = 0; + u32 *in; + int err; + + roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx : + &ipsec_roce->ipv6_rx; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return -EOPNOTSUPP; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + err = -EOPNOTSUPP; + goto release_peer; + } + + roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC); + if (!roce_ns) { + err = -EOPNOTSUPP; + goto release_peer; + } + + nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); + if (!nic_ns) { + err = -EOPNOTSUPP; + goto release_peer; + } + + in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto release_peer; + } + + ft_attr.level = (family == AF_INET) ? MLX5_IPSEC_RX_IPV4_FT_LEVEL : + MLX5_IPSEC_RX_IPV6_FT_LEVEL; + ft_attr.max_fte = 1; + ft = mlx5_create_flow_table(roce_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma master err=%d\n", err); + goto free_in; + } + + roce->ft_rdma = ft; + + ft_attr.max_fte = 1; + ft_attr.prio = prio; + ft_attr.level = level + 2; + ft = mlx5_create_flow_table(nic_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC master err=%d\n", err); + goto destroy_ft_rdma; + } + roce->nic_master_ft = ft; + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += 1; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->nic_master_ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group aliased err=%d\n", err); + goto destroy_nic_master_ft; + } + roce->nic_master_group = g; + + err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft, + &roce->alias_id, roce->key, false); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err); + goto destroy_group; + } + + next_ft.id = roce->alias_id; + ft_attr.max_fte = 1; + ft_attr.prio = prio; + ft_attr.level = roce->ft->level + 1; + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.next_ft = &next_ft; + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC slave err=%d\n", err); + goto destroy_alias; + } + roce->goto_alias_ft = ft; + + kvfree(in); + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return 0; + +destroy_alias: + mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); +destroy_group: + mlx5_destroy_flow_group(roce->nic_master_group); +destroy_nic_master_ft: + mlx5_destroy_flow_table(roce->nic_master_ft); +destroy_ft_rdma: + mlx5_destroy_flow_table(roce->ft_rdma); +free_in: + kvfree(in); +release_peer: + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return err; +} + +void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_core_dev *mdev) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; struct mlx5_ipsec_tx_roce *tx_roce; + struct mlx5e_priv *peer_priv; if (!ipsec_roce) return; tx_roce = &ipsec_roce->tx; + if (!tx_roce->ft) + return; /* Incase RoCE was cleaned from MPV event flow */ + mlx5_del_flow_rules(tx_roce->rule); mlx5_destroy_flow_group(tx_roce->g); mlx5_destroy_flow_table(tx_roce->ft); -} -#define MLX5_TX_ROCE_GROUP_SIZE BIT(0) + if (!mlx5_core_is_mp_slave(mdev)) + return; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return; + } + + mlx5_destroy_flow_table(tx_roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + tx_roce->ft = NULL; +} int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, - struct mlx5_flow_table *pol_ft) + struct mlx5_flow_table *pol_ft, + bool from_event) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_ipsec_tx_roce *roce; @@ -157,7 +613,14 @@ int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, if (!in) return -ENOMEM; + if (mlx5_core_is_mp_slave(mdev)) { + err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in, from_event); + goto free_in; + } + ft_attr.max_fte = 1; + ft_attr.prio = 1; + ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL; ft = mlx5_create_flow_table(roce->ns, &ft_attr); if (IS_ERR(ft)) { err = PTR_ERR(ft); @@ -209,8 +672,10 @@ struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_ro return rx_roce->ft; } -void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family) +void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family, + struct mlx5_core_dev *mdev) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_ipsec_rx_roce *rx_roce; if (!ipsec_roce) @@ -218,23 +683,29 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family) rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx : &ipsec_roce->ipv6_rx; + if (!rx_roce->ft) + return; /* Incase RoCE was cleaned from MPV event flow */ + if (is_mpv_slave) + mlx5_del_flow_rules(rx_roce->nic_master_rule); mlx5_del_flow_rules(rx_roce->roce_miss.rule); mlx5_del_flow_rules(rx_roce->rule); + if (is_mpv_slave) + roce_rx_mpv_destroy_tables(mdev, rx_roce); mlx5_destroy_flow_table(rx_roce->ft_rdma); mlx5_destroy_flow_group(rx_roce->roce_miss.group); mlx5_destroy_flow_group(rx_roce->g); mlx5_destroy_flow_table(rx_roce->ft); + rx_roce->ft = NULL; } -#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) - int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_namespace *ns, struct mlx5_flow_destination *default_dst, u32 family, u32 level, u32 prio) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_ipsec_rx_roce *roce; struct mlx5_flow_table *ft; @@ -298,18 +769,28 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, } roce->roce_miss.group = g; - memset(&ft_attr, 0, sizeof(ft_attr)); - if (family == AF_INET) - ft_attr.level = 1; - ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr); - if (IS_ERR(ft)) { - err = PTR_ERR(ft); - mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err); - goto fail_rdma_table; + if (is_mpv_slave) { + err = ipsec_fs_roce_rx_mpv_create(mdev, ipsec_roce, ns, family, level, prio); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias err=%d\n", err); + goto fail_mpv_create; + } + } else { + memset(&ft_attr, 0, sizeof(ft_attr)); + if (family == AF_INET) + ft_attr.level = 1; + ft_attr.max_fte = 1; + ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err); + goto fail_rdma_table; + } + + roce->ft_rdma = ft; } - roce->ft_rdma = ft; - err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce); if (err) { mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err); @@ -320,7 +801,10 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, return 0; fail_setup_rule: + if (is_mpv_slave) + roce_rx_mpv_destroy_tables(mdev, roce); mlx5_destroy_flow_table(roce->ft_rdma); +fail_mpv_create: fail_rdma_table: mlx5_destroy_flow_group(roce->roce_miss.group); fail_mgroup: @@ -332,12 +816,24 @@ fail_nomem: return err; } +bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev) +{ + if (!mlx5_core_mp_enabled(mdev)) + return true; + + if (ipsec_fs_create_alias_supported_one(mdev)) + return true; + + return false; +} + void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce) { kfree(ipsec_roce); } -struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev) +struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev, + struct mlx5_devcom_comp_dev **devcom) { struct mlx5_ipsec_fs *roce_ipsec; struct mlx5_flow_namespace *ns; @@ -363,6 +859,8 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev) roce_ipsec->tx.ns = ns; + roce_ipsec->devcom = devcom; + return roce_ipsec; err_tx: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h index 9712d705fe..2a1af78309 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h @@ -4,22 +4,28 @@ #ifndef __MLX5_LIB_IPSEC_H__ #define __MLX5_LIB_IPSEC_H__ +#include "lib/devcom.h" + struct mlx5_ipsec_fs; struct mlx5_flow_table * mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family); void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, - u32 family); + u32 family, struct mlx5_core_dev *mdev); int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_namespace *ns, struct mlx5_flow_destination *default_dst, u32 family, u32 level, u32 prio); -void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce); +void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_core_dev *mdev); int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, - struct mlx5_flow_table *pol_ft); + struct mlx5_flow_table *pol_ft, + bool from_event); void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce); -struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev); +struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev, + struct mlx5_devcom_comp_dev **devcom); +bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev); #endif /* __MLX5_LIB_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6ca91c0e8a..a17152c1cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -73,6 +73,7 @@ #include "sf/sf.h" #include "mlx5_irq.h" #include "hwmon.h" +#include "lag/lag.h" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); @@ -952,6 +953,27 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev) mlx5_pci_disable_device(dev); } +static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev) +{ + /* This component is use to sync adding core_dev to lag_dev and to sync + * changes of mlx5_adev_devices between LAG layer and other layers. + */ + if (!mlx5_lag_is_supported(dev)) + return; + + dev->priv.hca_devcom_comp = + mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS, + mlx5_query_nic_system_image_guid(dev), + NULL, dev); + if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp)) + mlx5_core_err(dev, "Failed to register devcom HCA component\n"); +} + +static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev) +{ + mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp); +} + static int mlx5_init_once(struct mlx5_core_dev *dev) { int err; @@ -960,6 +982,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) if (IS_ERR(dev->priv.devc)) mlx5_core_warn(dev, "failed to register devcom device %ld\n", PTR_ERR(dev->priv.devc)); + mlx5_register_hca_devcom_comp(dev); err = mlx5_query_board_id(dev); if (err) { @@ -1094,6 +1117,7 @@ err_eq_cleanup: err_irq_cleanup: mlx5_irq_table_cleanup(dev); err_devcom: + mlx5_unregister_hca_devcom_comp(dev); mlx5_devcom_unregister_device(dev->priv.devc); return err; @@ -1123,6 +1147,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_events_cleanup(dev); mlx5_eq_table_cleanup(dev); mlx5_irq_table_cleanup(dev); + mlx5_unregister_hca_devcom_comp(dev); mlx5_devcom_unregister_device(dev->priv.devc); } @@ -1411,9 +1436,9 @@ err_irq_table: static void mlx5_unload(struct mlx5_core_dev *dev) { + mlx5_eswitch_disable(dev->priv.eswitch); mlx5_devlink_traps_unregister(priv_to_devlink(dev)); mlx5_sf_dev_table_destroy(dev); - mlx5_eswitch_disable(dev->priv.eswitch); mlx5_sriov_detach(dev); mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 124352459c..6b14e347d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -41,6 +41,7 @@ #include #include #include +#include "lib/devcom.h" extern uint mlx5_core_debug_mask; @@ -97,6 +98,22 @@ do { \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define ACCESS_KEY_LEN 32 +#define FT_ID_FT_TYPE_OFFSET 24 + +struct mlx5_cmd_allow_other_vhca_access_attr { + u16 obj_type; + u32 obj_id; + u8 access_key[ACCESS_KEY_LEN]; +}; + +struct mlx5_cmd_alias_obj_create_attr { + u32 obj_id; + u16 vhca_id; + u16 obj_type; + u8 access_key[ACCESS_KEY_LEN]; +}; + static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) { struct device *device = dev->device; @@ -143,6 +160,8 @@ enum mlx5_semaphore_space_address { #define MLX5_DEFAULT_PROF 2 #define MLX5_SF_PROF 3 +#define MLX5_NUM_FW_CMD_THREADS 8 +#define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed, size_t item_size, size_t num_items, @@ -248,10 +267,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); -struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); -void mlx5_dev_list_lock(void); -void mlx5_dev_list_unlock(void); -int mlx5_dev_list_trylock(void); void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); @@ -290,14 +305,12 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) { int ret; - mlx5_dev_list_lock(); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); ret = mlx5_rescan_drivers_locked(dev); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); return ret; } -void mlx5_lag_update(struct mlx5_core_dev *dev); - enum { MLX5_NIC_IFC_FULL = 0, MLX5_NIC_IFC_DISABLED = 1, @@ -331,7 +344,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL) -void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work); static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); @@ -343,6 +355,12 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev); bool mlx5_rdma_supported(struct mlx5_core_dev *dev); bool mlx5_vnet_supported(struct mlx5_core_dev *dev); bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev); +int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, + struct mlx5_cmd_allow_other_vhca_access_attr *attr); +int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, + struct mlx5_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id); +int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type); static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index be70d1f23a..7d8c732818 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -1098,10 +1098,11 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, - [MLX5E_400GAUI_8] = 400000, + [MLX5E_400GAUI_8_400GBASE_CR8] = 400000, [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000, [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000, [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000, + [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000, }; int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index 05e148db98..c93492b677 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -14,17 +14,22 @@ struct mlx5_sf_dev_table { struct xarray devices; - unsigned int max_sfs; phys_addr_t base_address; u64 sf_bar_length; struct notifier_block nb; - struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */ struct workqueue_struct *active_wq; struct work_struct work; u8 stop_active_wq:1; struct mlx5_core_dev *dev; }; +struct mlx5_sf_dev_active_work_ctx { + struct work_struct work; + struct mlx5_vhca_state_event event; + struct mlx5_sf_dev_table *table; + int sf_index; +}; + static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev) { return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev); @@ -110,12 +115,6 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, sf_dev->parent_mdev = dev; sf_dev->fn_id = fn_id; - if (!table->max_sfs) { - mlx5_adev_idx_free(id); - kfree(sf_dev); - err = -EOPNOTSUPP; - goto add_err; - } sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); trace_mlx5_sf_dev_add(dev, sf_dev, id); @@ -172,7 +171,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ return 0; sf_index = event->function_id - base_id; - mutex_lock(&table->table_lock); sf_dev = xa_load(&table->devices, sf_index); switch (event->new_vhca_state) { case MLX5_VHCA_STATE_INVALID: @@ -196,7 +194,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ default: break; } - mutex_unlock(&table->table_lock); return 0; } @@ -221,15 +218,44 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) return 0; } -static void mlx5_sf_dev_add_active_work(struct work_struct *work) +static void mlx5_sf_dev_add_active_work(struct work_struct *_work) { - struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work); + struct mlx5_sf_dev_active_work_ctx *work_ctx; + + work_ctx = container_of(_work, struct mlx5_sf_dev_active_work_ctx, work); + if (work_ctx->table->stop_active_wq) + goto out; + /* Don't probe device which is already probe */ + if (!xa_load(&work_ctx->table->devices, work_ctx->sf_index)) + mlx5_sf_dev_add(work_ctx->table->dev, work_ctx->sf_index, + work_ctx->event.function_id, work_ctx->event.sw_function_id); + /* There is a race where SF got inactive after the query + * above. e.g.: the query returns that the state of the + * SF is active, and after that the eswitch manager set it to + * inactive. + * This case cannot be managed in SW, since the probing of the + * SF is on one system, and the inactivation is on a different + * system. + * If the inactive is done after the SF perform init_hca(), + * the SF will fully probe and then removed. If it was + * done before init_hca(), the SF probe will fail. + */ +out: + kfree(work_ctx); +} + +/* In case SFs are generated externally, probe active SFs */ +static void mlx5_sf_dev_queue_active_works(struct work_struct *_work) +{ + struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work); u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; + struct mlx5_sf_dev_active_work_ctx *work_ctx; struct mlx5_core_dev *dev = table->dev; u16 max_functions; u16 function_id; u16 sw_func_id; int err = 0; + int wq_idx; u8 state; int i; @@ -249,27 +275,22 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work) continue; sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id); - mutex_lock(&table->table_lock); - /* Don't probe device which is already probe */ - if (!xa_load(&table->devices, i)) - mlx5_sf_dev_add(dev, i, function_id, sw_func_id); - /* There is a race where SF got inactive after the query - * above. e.g.: the query returns that the state of the - * SF is active, and after that the eswitch manager set it to - * inactive. - * This case cannot be managed in SW, since the probing of the - * SF is on one system, and the inactivation is on a different - * system. - * If the inactive is done after the SF perform init_hca(), - * the SF will fully probe and then removed. If it was - * done before init_hca(), the SF probe will fail. - */ - mutex_unlock(&table->table_lock); + work_ctx = kzalloc(sizeof(*work_ctx), GFP_KERNEL); + if (!work_ctx) + return; + + INIT_WORK(&work_ctx->work, &mlx5_sf_dev_add_active_work); + work_ctx->event.function_id = function_id; + work_ctx->event.sw_function_id = sw_func_id; + work_ctx->table = table; + work_ctx->sf_index = i; + wq_idx = work_ctx->event.function_id % MLX5_DEV_MAX_WQS; + mlx5_vhca_events_work_enqueue(dev, wq_idx, &work_ctx->work); } } /* In case SFs are generated externally, probe active SFs */ -static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) +static int mlx5_sf_dev_create_active_works(struct mlx5_sf_dev_table *table) { if (MLX5_CAP_GEN(table->dev, eswitch_manager)) return 0; /* the table is local */ @@ -280,12 +301,12 @@ static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) table->active_wq = create_singlethread_workqueue("mlx5_active_sf"); if (!table->active_wq) return -ENOMEM; - INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work); + INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works); queue_work(table->active_wq, &table->work); return 0; } -static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) +static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table) { if (table->active_wq) { table->stop_active_wq = true; @@ -296,7 +317,6 @@ static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) { struct mlx5_sf_dev_table *table; - unsigned int max_sfs; int err; if (!mlx5_sf_dev_supported(dev)) @@ -310,37 +330,30 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) table->nb.notifier_call = mlx5_sf_dev_state_change_handler; table->dev = dev; - if (MLX5_CAP_GEN(dev, max_num_sf)) - max_sfs = MLX5_CAP_GEN(dev, max_num_sf); - else - max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf); table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); table->base_address = pci_resource_start(dev->pdev, 2); - table->max_sfs = max_sfs; xa_init(&table->devices); - mutex_init(&table->table_lock); dev->priv.sf_dev_table = table; err = mlx5_vhca_event_notifier_register(dev, &table->nb); if (err) goto vhca_err; - err = mlx5_sf_dev_queue_active_work(table); + err = mlx5_sf_dev_create_active_works(table); if (err) goto add_active_err; err = mlx5_sf_dev_vhca_arm_all(table); if (err) goto arm_err; - mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs); return; arm_err: - mlx5_sf_dev_destroy_active_work(table); + mlx5_sf_dev_destroy_active_works(table); add_active_err: mlx5_vhca_event_notifier_unregister(dev, &table->nb); + mlx5_vhca_event_work_queues_flush(dev); vhca_err: - table->max_sfs = 0; kfree(table); dev->priv.sf_dev_table = NULL; table_err: @@ -365,9 +378,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) if (!table) return; - mlx5_sf_dev_destroy_active_work(table); + mlx5_sf_dev_destroy_active_works(table); mlx5_vhca_event_notifier_unregister(dev, &table->nb); - mutex_destroy(&table->table_lock); + mlx5_vhca_event_work_queues_flush(dev); /* Now that event handler is not running, it is safe to destroy * the sf device without race. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h index 2a66a427ef..b99131e95e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h @@ -19,6 +19,12 @@ struct mlx5_sf_dev { u16 fn_id; }; +struct mlx5_sf_peer_devlink_event_ctx { + u16 fn_id; + struct devlink *devlink; + int err; +}; + void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev); void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 8fe82f1191..169c2c68ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -8,6 +8,20 @@ #include "dev.h" #include "devlink.h" +static int mlx5_core_peer_devlink_set(struct mlx5_sf_dev *sf_dev, struct devlink *devlink) +{ + struct mlx5_sf_peer_devlink_event_ctx event_ctx = { + .fn_id = sf_dev->fn_id, + .devlink = devlink, + }; + int ret; + + ret = mlx5_blocking_notifier_call_chain(sf_dev->parent_mdev, + MLX5_DRIVER_EVENT_SF_PEER_DEVLINK, + &event_ctx); + return ret == NOTIFY_OK ? event_ctx.err : 0; +} + static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); @@ -54,9 +68,21 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); goto init_one_err; } + + err = mlx5_core_peer_devlink_set(sf_dev, devlink); + if (err) { + mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err); + goto peer_devlink_set_err; + } + devlink_register(devlink); return 0; +peer_devlink_set_err: + if (mlx5_dev_is_lightweight(sf_dev->mdev)) + mlx5_uninit_one_light(sf_dev->mdev); + else + mlx5_uninit_one(sf_dev->mdev); init_one_err: iounmap(mdev->iseg); remap_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index e34a8f88c5..6c11e075ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -20,43 +20,36 @@ struct mlx5_sf { u16 hw_state; }; +static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port) +{ + struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port); + + return container_of(mlx5_dl_port, struct mlx5_sf, dl_port); +} + struct mlx5_sf_table { struct mlx5_core_dev *dev; /* To refer from notifier context. */ - struct xarray port_indices; /* port index based lookup. */ - refcount_t refcount; - struct completion disable_complete; + struct xarray function_ids; /* function id based lookup. */ struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */ struct notifier_block esw_nb; struct notifier_block vhca_nb; + struct notifier_block mdev_nb; }; -static struct mlx5_sf * -mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index) -{ - return xa_load(&table->port_indices, port_index); -} - static struct mlx5_sf * mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) { - unsigned long index; - struct mlx5_sf *sf; - - xa_for_each(&table->port_indices, index, sf) { - if (sf->hw_fn_id == fn_id) - return sf; - } - return NULL; + return xa_load(&table->function_ids, fn_id); } -static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) +static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) { - return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL); + return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL); } -static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) +static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) { - xa_erase(&table->port_indices, sf->port_index); + xa_erase(&table->function_ids, sf->hw_fn_id); } static struct mlx5_sf * @@ -93,7 +86,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, sf->hw_state = MLX5_VHCA_STATE_ALLOCATED; sf->controller = controller; - err = mlx5_sf_id_insert(table, sf); + err = mlx5_sf_function_id_insert(table, sf); if (err) goto insert_err; @@ -111,28 +104,11 @@ id_err: static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) { - mlx5_sf_id_erase(table, sf); mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id); trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id); kfree(sf); } -static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev) -{ - struct mlx5_sf_table *table = dev->priv.sf_table; - - if (!table) - return NULL; - - return refcount_inc_not_zero(&table->refcount) ? table : NULL; -} - -static void mlx5_sf_table_put(struct mlx5_sf_table *table) -{ - if (refcount_dec_and_test(&table->refcount)) - complete(&table->disable_complete); -} - static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state) { switch (hw_state) { @@ -172,26 +148,14 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); - struct mlx5_sf_table *table; - struct mlx5_sf *sf; - int err = 0; - - table = mlx5_sf_table_try_get(dev); - if (!table) - return -EOPNOTSUPP; + struct mlx5_sf_table *table = dev->priv.sf_table; + struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); - sf = mlx5_sf_lookup_by_index(table, dl_port->index); - if (!sf) { - err = -EOPNOTSUPP; - goto sf_err; - } mutex_lock(&table->sf_state_lock); *state = mlx5_sf_to_devlink_state(sf->hw_state); *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state); mutex_unlock(&table->sf_state_lock); -sf_err: - mlx5_sf_table_put(table); - return err; + return 0; } static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, @@ -257,26 +221,10 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); - struct mlx5_sf_table *table; - struct mlx5_sf *sf; - int err; - - table = mlx5_sf_table_try_get(dev); - if (!table) { - NL_SET_ERR_MSG_MOD(extack, - "Port state set is only supported in eswitch switchdev mode or SF ports are disabled."); - return -EOPNOTSUPP; - } - sf = mlx5_sf_lookup_by_index(table, dl_port->index); - if (!sf) { - err = -ENODEV; - goto out; - } + struct mlx5_sf_table *table = dev->priv.sf_table; + struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); - err = mlx5_sf_state_set(dev, table, sf, state, extack); -out: - mlx5_sf_table_put(table); - return err; + return mlx5_sf_state_set(dev, table, sf, state, extack); } static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, @@ -335,32 +283,45 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ return 0; } +static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) +{ + return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && + mlx5_sf_hw_table_supported(dev); +} + int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *new_attr, struct netlink_ext_ack *extack, struct devlink_port **dl_port) { struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_sf_table *table; + struct mlx5_sf_table *table = dev->priv.sf_table; int err; err = mlx5_sf_new_check_attr(dev, new_attr, extack); if (err) return err; - table = mlx5_sf_table_try_get(dev); - if (!table) { + if (!mlx5_sf_table_supported(dev)) { + NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported."); + return -EOPNOTSUPP; + } + + if (!is_mdev_switchdev_mode(dev)) { NL_SET_ERR_MSG_MOD(extack, - "Port add is only supported in eswitch switchdev mode or SF ports are disabled."); + "SF ports are only supported in eswitch switchdev mode."); return -EOPNOTSUPP; } - err = mlx5_sf_add(dev, table, new_attr, extack, dl_port); - mlx5_sf_table_put(table); - return err; + + return mlx5_sf_add(dev, table, new_attr, extack, dl_port); } static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) { + mutex_lock(&table->sf_state_lock); + + mlx5_sf_function_id_erase(table, sf); + if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) { mlx5_sf_free(table, sf); } else if (mlx5_sf_is_active(sf)) { @@ -376,6 +337,16 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id); kfree(sf); } + + mutex_unlock(&table->sf_state_lock); +} + +static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + struct mlx5_eswitch *esw = table->dev->priv.eswitch; + + mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id); + mlx5_sf_dealloc(table, sf); } int mlx5_devlink_sf_port_del(struct devlink *devlink, @@ -383,32 +354,11 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; - struct mlx5_sf_table *table; - struct mlx5_sf *sf; - int err = 0; - - table = mlx5_sf_table_try_get(dev); - if (!table) { - NL_SET_ERR_MSG_MOD(extack, - "Port del is only supported in eswitch switchdev mode or SF ports are disabled."); - return -EOPNOTSUPP; - } - sf = mlx5_sf_lookup_by_index(table, dl_port->index); - if (!sf) { - err = -ENODEV; - goto sf_err; - } - - mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id); - mlx5_sf_id_erase(table, sf); + struct mlx5_sf_table *table = dev->priv.sf_table; + struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); - mutex_lock(&table->sf_state_lock); - mlx5_sf_dealloc(table, sf); - mutex_unlock(&table->sf_state_lock); -sf_err: - mlx5_sf_table_put(table); - return err; + mlx5_sf_del(table, sf); + return 0; } static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state) @@ -433,14 +383,10 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v bool update = false; struct mlx5_sf *sf; - table = mlx5_sf_table_try_get(table->dev); - if (!table) - return 0; - mutex_lock(&table->sf_state_lock); sf = mlx5_sf_lookup_by_function_id(table, event->function_id); if (!sf) - goto sf_err; + goto unlock; /* When driver is attached or detached to a function, an event * notifies such state change. @@ -450,46 +396,18 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v sf->hw_state = event->new_vhca_state; trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller, sf->hw_fn_id, sf->hw_state); -sf_err: +unlock: mutex_unlock(&table->sf_state_lock); - mlx5_sf_table_put(table); return 0; } -static void mlx5_sf_table_enable(struct mlx5_sf_table *table) -{ - init_completion(&table->disable_complete); - refcount_set(&table->refcount, 1); -} - -static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) +static void mlx5_sf_del_all(struct mlx5_sf_table *table) { - struct mlx5_eswitch *esw = table->dev->priv.eswitch; unsigned long index; struct mlx5_sf *sf; - /* At this point, no new user commands can start and no vhca event can - * arrive. It is safe to destroy all user created SFs. - */ - xa_for_each(&table->port_indices, index, sf) { - mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id); - mlx5_sf_id_erase(table, sf); - mlx5_sf_dealloc(table, sf); - } -} - -static void mlx5_sf_table_disable(struct mlx5_sf_table *table) -{ - if (!refcount_read(&table->refcount)) - return; - - /* Balances with refcount_set; drop the reference so that new user cmd cannot start - * and new vhca event handler cannot run. - */ - mlx5_sf_table_put(table); - wait_for_completion(&table->disable_complete); - - mlx5_sf_deactivate_all(table); + xa_for_each(&table->function_ids, index, sf) + mlx5_sf_del(table, sf); } static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data) @@ -498,11 +416,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi const struct mlx5_esw_event_info *mode = data; switch (mode->new_mode) { - case MLX5_ESWITCH_OFFLOADS: - mlx5_sf_table_enable(table); - break; case MLX5_ESWITCH_LEGACY: - mlx5_sf_table_disable(table); + mlx5_sf_del_all(table); break; default: break; @@ -511,10 +426,29 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi return 0; } -static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) +static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data) { - return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && - mlx5_sf_hw_table_supported(dev); + struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb); + struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data; + int ret = NOTIFY_DONE; + struct mlx5_sf *sf; + + if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK) + return NOTIFY_DONE; + + + mutex_lock(&table->sf_state_lock); + sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id); + if (!sf) + goto out; + + event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port, + event_ctx->devlink); + + ret = NOTIFY_OK; +out: + mutex_unlock(&table->sf_state_lock); + return ret; } int mlx5_sf_table_init(struct mlx5_core_dev *dev) @@ -531,9 +465,8 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev) mutex_init(&table->sf_state_lock); table->dev = dev; - xa_init(&table->port_indices); + xa_init(&table->function_ids); dev->priv.sf_table = table; - refcount_set(&table->refcount, 0); table->esw_nb.notifier_call = mlx5_sf_esw_event; err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb); if (err) @@ -544,6 +477,9 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev) if (err) goto vhca_err; + table->mdev_nb.notifier_call = mlx5_sf_mdev_event; + mlx5_blocking_notifier_register(dev, &table->mdev_nb); + return 0; vhca_err: @@ -562,10 +498,10 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) if (!table) return; + mlx5_blocking_notifier_unregister(dev, &table->mdev_nb); mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); - WARN_ON(refcount_read(&table->refcount)); mutex_destroy(&table->sf_state_lock); - WARN_ON(!xa_empty(&table->port_indices)); + WARN_ON(!xa_empty(&table->function_ids)); kfree(table); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c index d908fba968..cda01ba441 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -21,6 +21,15 @@ struct mlx5_vhca_event_work { struct mlx5_vhca_state_event event; }; +struct mlx5_vhca_event_handler { + struct workqueue_struct *wq; +}; + +struct mlx5_vhca_events { + struct mlx5_core_dev *dev; + struct mlx5_vhca_event_handler handler[MLX5_DEV_MAX_WQS]; +}; + int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) { u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; @@ -99,6 +108,11 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work) kfree(work); } +void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work) +{ + queue_work(dev->priv.vhca_events->handler[idx].wq, work); +} + static int mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data) { @@ -106,6 +120,7 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb); struct mlx5_vhca_event_work *work; struct mlx5_eqe *eqe = data; + int wq_idx; work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) @@ -113,7 +128,8 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); work->notifier = notifier; work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); - mlx5_events_work_enqueue(notifier->dev, &work->work); + wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS; + mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work); return NOTIFY_OK; } @@ -132,28 +148,75 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) int mlx5_vhca_event_init(struct mlx5_core_dev *dev) { struct mlx5_vhca_state_notifier *notifier; + char wq_name[MLX5_CMD_WQ_MAX_NAME]; + struct mlx5_vhca_events *events; + int err, i; if (!mlx5_vhca_event_supported(dev)) return 0; - notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); - if (!notifier) + events = kzalloc(sizeof(*events), GFP_KERNEL); + if (!events) return -ENOMEM; + events->dev = dev; + dev->priv.vhca_events = events; + for (i = 0; i < MLX5_DEV_MAX_WQS; i++) { + snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i); + events->handler[i].wq = create_singlethread_workqueue(wq_name); + if (!events->handler[i].wq) { + err = -ENOMEM; + goto err_create_wq; + } + } + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) { + err = -ENOMEM; + goto err_notifier; + } + dev->priv.vhca_state_notifier = notifier; notifier->dev = dev; BLOCKING_INIT_NOTIFIER_HEAD(¬ifier->n_head); MLX5_NB_INIT(¬ifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE); return 0; + +err_notifier: +err_create_wq: + for (--i; i >= 0; i--) + destroy_workqueue(events->handler[i].wq); + kfree(events); + return err; +} + +void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev) +{ + struct mlx5_vhca_events *vhca_events; + int i; + + if (!mlx5_vhca_event_supported(dev)) + return; + + vhca_events = dev->priv.vhca_events; + for (i = 0; i < MLX5_DEV_MAX_WQS; i++) + flush_workqueue(vhca_events->handler[i].wq); } void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev) { + struct mlx5_vhca_events *vhca_events; + int i; + if (!mlx5_vhca_event_supported(dev)) return; kfree(dev->priv.vhca_state_notifier); dev->priv.vhca_state_notifier = NULL; + vhca_events = dev->priv.vhca_events; + for (i = 0; i < MLX5_DEV_MAX_WQS; i++) + destroy_workqueue(vhca_events->handler[i].wq); + kvfree(vhca_events); } void mlx5_vhca_event_start(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h index 013cdfe906..1725ba64f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h @@ -28,6 +28,9 @@ int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen); +void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work); +void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev); + #else static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 90c38cbbde..d2b65a0ce4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -55,6 +55,13 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id) return action_type_to_str[action_id]; } +static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev) +{ + return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX || + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) || + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table)); +} + static const enum dr_action_valid_state next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = { [DR_ACTION_DOMAIN_NIC_INGRESS] = { @@ -1170,12 +1177,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, bool ignore_flow_level, u32 flow_source) { + struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest; struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_action **ref_actions; struct mlx5dr_action *action; bool reformat_req = false; + bool is_ft_wire = false; + u16 num_dst_ft = 0; u32 num_of_ref = 0; u32 ref_act_cnt; + u16 last_dest; int ret; int i; @@ -1217,11 +1228,22 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, break; case DR_ACTION_TYP_FT: + if (num_dst_ft && + !mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) { + mlx5dr_dbg(dmn, "multiple FT destinations not supported\n"); + goto free_ref_actions; + } + num_dst_ft++; hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - if (dest_action->dest_tbl->is_fw_tbl) + if (dest_action->dest_tbl->is_fw_tbl) { hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id; - else + } else { hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id; + if (dest_action->dest_tbl->is_wire_ft) { + is_ft_wire = true; + last_dest = i; + } + } break; default: @@ -1230,6 +1252,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, } } + /* In multidest, the FW does the iterator in the RX except of the last + * one that done in the TX. + * So, if one of the ft target is wire, put it at the end of the dest list. + */ + if (is_ft_wire && num_dst_ft > 1) { + tmp_hw_dest = hw_dests[last_dest]; + hw_dests[last_dest] = hw_dests[num_of_dests - 1]; + hw_dests[num_of_dests - 1] = tmp_hw_dest; + } + action = dr_action_create_generic(DR_ACTION_TYP_FT); if (!action) goto free_ref_actions; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 6c59de3e28..81eff6c410 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -436,10 +436,6 @@ void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, - struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -1064,6 +1060,7 @@ struct mlx5dr_action_sampler { struct mlx5dr_action_dest_tbl { u8 is_fw_tbl:1; + u8 is_wire_ft:1; union { struct mlx5dr_table *tbl; struct { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 14f6df88b1..50c2554c9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -209,10 +209,17 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain, struct mlx5_flow_rule *dst) { struct mlx5_flow_table *dest_ft = dst->dest_attr.ft; + struct mlx5dr_action *tbl_action; if (mlx5dr_is_fw_table(dest_ft)) return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft); - return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); + + tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); + if (tbl_action) + tbl_action->dest_tbl->is_wire_ft = + dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0; + + return tbl_action; } static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain, diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index aaf1faed41..3d09fa5459 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -471,7 +471,7 @@ out: return err; } -static int mlxbf_gige_remove(struct platform_device *pdev) +static void mlxbf_gige_remove(struct platform_device *pdev) { struct mlxbf_gige *priv = platform_get_drvdata(pdev); @@ -479,8 +479,6 @@ static int mlxbf_gige_remove(struct platform_device *pdev) phy_disconnect(priv->netdev->phydev); mlxbf_gige_mdio_remove(priv); platform_set_drvdata(pdev, NULL); - - return 0; } static void mlxbf_gige_shutdown(struct platform_device *pdev) @@ -499,7 +497,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match); static struct platform_driver mlxbf_gige_driver = { .probe = mlxbf_gige_probe, - .remove = mlxbf_gige_remove, + .remove_new = mlxbf_gige_remove, .shutdown = mlxbf_gige_shutdown, .driver = { .name = KBUILD_MODNAME, diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 09bef04b11..e827c78be1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -276,6 +276,12 @@ MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8); */ MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8); +/* cmd_mbox_query_fw_lag_mode_support + * 0: CONFIG_PROFILE.lag_mode is not supported by FW + * 1: CONFIG_PROFILE.lag_mode is supported by FW + */ +MLXSW_ITEM32(cmd_mbox, query_fw, lag_mode_support, 0x18, 1, 1); + /* cmd_mbox_query_fw_clr_int_base_offset * Clear Interrupt register's offset from clr_int_bar register * in PCI address space. @@ -659,42 +665,48 @@ MLXSW_ITEM32(cmd_mbox, config_profile, */ MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1); -/* cmd_mbox_config_set_ubridge +/* cmd_mbox_config_profile_set_ubridge * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1); -/* cmd_mbox_config_set_kvd_linear_size +/* cmd_mbox_config_profile_set_kvd_linear_size * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1); -/* cmd_mbox_config_set_kvd_hash_single_size +/* cmd_mbox_config_profile_set_kvd_hash_single_size * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1); -/* cmd_mbox_config_set_kvd_hash_double_size +/* cmd_mbox_config_profile_set_kvd_hash_double_size * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); -/* cmd_mbox_config_set_cqe_version +/* cmd_mbox_config_profile_set_cqe_version * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1); -/* cmd_mbox_config_set_cqe_time_stamp_type +/* cmd_mbox_config_profile_set_cqe_time_stamp_type * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1); +/* cmd_mbox_config_profile_set_lag_mode + * Capability bit. Setting a bit to 1 configures the lag_mode + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_lag_mode, 0x08, 7, 1); + /* cmd_mbox_config_profile_max_vepa_channels * Maximum number of VEPA channels per port (0 through 16) * 0 - multi-channel VEPA is disabled @@ -840,6 +852,21 @@ MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1); +enum mlxsw_cmd_mbox_config_profile_lag_mode { + /* FW manages PGT LAG table */ + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW, + /* SW manages PGT LAG table */ + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW, +}; + +/* cmd_mbox_config_profile_lag_mode + * LAG mode + * Configured if set_lag_mode is set + * Supported from Spectrum-2 and above. + * Supported only when ubridge = 1 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, lag_mode, 0x50, 3, 1); + /* cmd_mbox_config_kvd_linear_size * KVD Linear Size * Valid for Spectrum only @@ -847,7 +874,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24); -/* cmd_mbox_config_kvd_hash_single_size +/* cmd_mbox_config_profile_kvd_hash_single_size * KVD Hash single-entries size * Valid for Spectrum only * Allowed values are 128*N where N=0 or higher @@ -856,7 +883,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24); */ MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24); -/* cmd_mbox_config_kvd_hash_double_size +/* cmd_mbox_config_profile_kvd_hash_double_size * KVD Hash double-entries size (units of single-size entries) * Valid for Spectrum only * Allowed values are 128*N where N=0 or higher diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 1ccf3b73ed..f23421f038 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -204,6 +204,13 @@ int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag) } EXPORT_SYMBOL(mlxsw_core_max_lag); +enum mlxsw_cmd_mbox_config_profile_lag_mode +mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->bus->lag_mode(mlxsw_core->bus_priv); +} +EXPORT_SYMBOL(mlxsw_core_lag_mode); + void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) { return mlxsw_core->driver_priv; @@ -1792,122 +1799,78 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg, static const struct mlxsw_listener mlxsw_core_health_listener = MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE); -static int +static void mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val, tile_v; - int err; val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl); if (tile_v) { val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); } - - return 0; } -static int +static void mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val, tile_v; - int err; val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var0", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var0", val); val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var1", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var1", val); val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var2", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var2", val); val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var3", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var3", val); val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var4", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var4", val); val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "existptr", val); val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "callra", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "callra", val); val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "old_event", val); tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl); if (tile_v) { val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); } val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); - if (err) - return err; - - return 0; + devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); } -static int +static void mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val; - int err; val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "old_event", val); val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl); - return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); + devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); } -static int +static void mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val; - int err; val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "log_address", val); val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "old_event", val); val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl); - err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); - if (err) - return err; - - return 0; + devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); } static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter, @@ -1918,24 +1881,17 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor char *val_str; u8 event_id; u32 val; - int err; if (!priv_ctx) /* User-triggered dumps are not possible */ return -EOPNOTSUPP; val = mlxsw_reg_mfde_irisc_id_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val); - if (err) - return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "event"); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val); + devlink_fmsg_arr_pair_nest_start(fmsg, "event"); event_id = mlxsw_reg_mfde_event_id_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "id", event_id); switch (event_id) { case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: val_str = "CR space timeout"; @@ -1955,24 +1911,13 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str); - if (err) - return err; - } - - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); - if (err) - return err; + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "desc", val_str); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); val = mlxsw_reg_mfde_severity_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "id", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "id", val); switch (val) { case MLXSW_REG_MFDE_SEVERITY_FATL: val_str = "Fatal"; @@ -1986,15 +1931,9 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str); - if (err) - return err; - } - - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "desc", val_str); + devlink_fmsg_arr_pair_nest_end(fmsg); val = mlxsw_reg_mfde_method_get(mfde_pl); switch (val) { @@ -2007,16 +1946,11 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "method", val_str); - if (err) - return err; - } + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "method", val_str); val = mlxsw_reg_mfde_long_process_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "long_process", val); val = mlxsw_reg_mfde_command_type_get(mfde_pl); switch (val) { @@ -2032,29 +1966,25 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str); - if (err) - return err; - } + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "command_type", val_str); val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val); switch (event_id) { case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: - return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, - fmsg); + mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, fmsg); + break; case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: - return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, - fmsg); + mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, fmsg); + break; case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: - return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); + mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); + break; case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: - return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, - fmsg); + mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, fmsg); + break; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index e5474d3e34..764d14bd5b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -36,6 +36,8 @@ struct mlxsw_fw_rev; unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag); +enum mlxsw_cmd_mbox_config_profile_lag_mode +mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core); void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); @@ -335,6 +337,7 @@ struct mlxsw_config_profile { u8 kvd_hash_single_parts; u8 kvd_hash_double_parts; u8 cqe_time_stamp_type; + bool lag_mode_prefer_sw; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -485,6 +488,7 @@ struct mlxsw_bus { u32 (*read_frc_l)(void *bus_priv); u32 (*read_utc_sec)(void *bus_priv); u32 (*read_utc_nsec)(void *bus_priv); + enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv); u8 features; }; @@ -624,7 +628,7 @@ struct mlxsw_linecards { struct mlxsw_linecard_types_info *types_info; struct list_head event_ops_list; struct mutex event_ops_list_lock; /* Locks accesses to event ops list */ - struct mlxsw_linecard linecards[]; + struct mlxsw_linecard linecards[] __counted_by(count); }; static inline struct mlxsw_linecard * diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 70f9b5e85a..0d5e6f9b46 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -32,8 +32,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER, 0x18, 17, 12), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), @@ -44,6 +43,9 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4), MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1), MLXSW_AFK_ELEMENT_INFO_U32(L4_PORT_RANGE, 0x40, 1, 16), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_3, 0x40, 17, 4), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_4_7, 0x40, 21, 4), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x40, 25, 4), }; struct mlxsw_afk { @@ -136,6 +138,7 @@ mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_picker { DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); + DECLARE_BITMAP(chosen_element, MLXSW_AFK_ELEMENT_MAX); unsigned int total; }; @@ -206,7 +209,7 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, if (key_info->blocks_count == mlxsw_afk->max_blocks) return -EINVAL; - for_each_set_bit(element, picker[block_index].element, + for_each_set_bit(element, picker[block_index].chosen_element, MLXSW_AFK_ELEMENT_MAX) { key_info->element_to_block[element] = key_info->blocks_count; mlxsw_afk_element_usage_add(&key_info->elusage, element); @@ -218,11 +221,43 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, return 0; } +static int mlxsw_afk_keys_fill(struct mlxsw_afk *mlxsw_afk, + unsigned long *chosen_blocks_bm, + struct mlxsw_afk_picker *picker, + struct mlxsw_afk_key_info *key_info) +{ + int i, err; + + /* First fill only key blocks with high_entropy. */ + for_each_set_bit(i, chosen_blocks_bm, mlxsw_afk->blocks_count) { + if (!mlxsw_afk->blocks[i].high_entropy) + continue; + + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, i, + key_info); + if (err) + return err; + __clear_bit(i, chosen_blocks_bm); + } + + /* Fill the rest of key blocks. */ + for_each_set_bit(i, chosen_blocks_bm, mlxsw_afk->blocks_count) { + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, i, + key_info); + if (err) + return err; + } + + return 0; +} + static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_usage *elusage) { + DECLARE_BITMAP(elusage_chosen, MLXSW_AFK_ELEMENT_MAX) = {0}; struct mlxsw_afk_picker *picker; + unsigned long *chosen_blocks_bm; enum mlxsw_afk_element element; int err; @@ -230,6 +265,12 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, if (!picker) return -ENOMEM; + chosen_blocks_bm = bitmap_zalloc(mlxsw_afk->blocks_count, GFP_KERNEL); + if (!chosen_blocks_bm) { + err = -ENOMEM; + goto err_bitmap_alloc; + } + /* Since the same elements could be present in multiple blocks, * we must find out optimal block list in order to make the * block count as low as possible. @@ -254,15 +295,26 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, err = block_index; goto out; } - err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, - block_index, key_info); - if (err) - goto out; + + __set_bit(block_index, chosen_blocks_bm); + + bitmap_copy(picker[block_index].chosen_element, + picker[block_index].element, MLXSW_AFK_ELEMENT_MAX); + + bitmap_or(elusage_chosen, elusage_chosen, + picker[block_index].chosen_element, + MLXSW_AFK_ELEMENT_MAX); + mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index); - } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage)); - err = 0; + } while (!bitmap_equal(elusage_chosen, elusage->usage, + MLXSW_AFK_ELEMENT_MAX)); + + err = mlxsw_afk_keys_fill(mlxsw_afk, chosen_blocks_bm, picker, + key_info); out: + bitmap_free(chosen_blocks_bm); +err_bitmap_alloc: kfree(picker); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 2eac7582c3..98a0559817 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -33,10 +33,12 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_IP_TTL_, MLXSW_AFK_ELEMENT_IP_ECN, MLXSW_AFK_ELEMENT_IP_DSCP, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, + MLXSW_AFK_ELEMENT_VIRT_ROUTER, MLXSW_AFK_ELEMENT_FDB_MISS, MLXSW_AFK_ELEMENT_L4_PORT_RANGE, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, MLXSW_AFK_ELEMENT_MAX, }; @@ -117,6 +119,7 @@ struct mlxsw_afk_block { u16 encoding; /* block ID */ struct mlxsw_afk_element_inst *instances; unsigned int instances_count; + bool high_entropy; }; #define MLXSW_AFK_BLOCK(_encoding, _instances) \ @@ -126,6 +129,14 @@ struct mlxsw_afk_block { .instances_count = ARRAY_SIZE(_instances), \ } +#define MLXSW_AFK_BLOCK_HIGH_ENTROPY(_encoding, _instances) \ + { \ + .encoding = _encoding, \ + .instances = _instances, \ + .instances_count = ARRAY_SIZE(_instances), \ + .high_entropy = true, \ + } + struct mlxsw_afk_element_usage { DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX); }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index d637c0348f..53b150b7ae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -34,7 +34,7 @@ struct mlxsw_env { u8 num_of_slots; /* Including the main board. */ u8 max_eeprom_len; /* Maximum module EEPROM transaction length. */ struct mutex line_cards_lock; /* Protects line cards. */ - struct mlxsw_env_line_card *line_cards[]; + struct mlxsw_env_line_card *line_cards[] __counted_by(num_of_slots); }; static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env, @@ -775,7 +775,7 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, int err; mlxsw_reg_mtbr_pack(mtbr_pl, slot_index, - MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 0fd290d776..9c12e1feb6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -293,7 +293,7 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index, - MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); if (err) { dev_err(dev, "Failed to query module temperature sensor\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c index af37e650a8..e8d6fe35bf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c @@ -132,6 +132,7 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev, struct mlxsw_linecard *linecard = linecard_bdev->linecard; struct mlxsw_linecard_dev *linecard_dev; struct devlink *devlink; + int err; devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops, sizeof(*linecard_dev), &adev->dev); @@ -141,8 +142,12 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev, linecard_dev->linecard = linecard_bdev->linecard; linecard_bdev->linecard_dev = linecard_dev; + err = devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink); + if (err) { + devlink_free(devlink); + return err; + } devlink_register(devlink); - devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink); return 0; } @@ -151,9 +156,7 @@ static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev) struct mlxsw_linecard_bdev *linecard_bdev = container_of(adev, struct mlxsw_linecard_bdev, adev); struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev); - struct mlxsw_linecard *linecard = linecard_bdev->linecard; - devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL); devlink_unregister(devlink); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 70d7fff24f..f1b48d6615 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -31,6 +31,7 @@ /* External cooling devices, allowed for binding to mlxsw thermal zones. */ static char * const mlxsw_thermal_external_allowed_cdev[] = { "mlxreg_fan", + "emc2305", }; struct mlxsw_cooling_states { @@ -535,7 +536,7 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal, static int mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz) { - char tz_name[THERMAL_NAME_LENGTH]; + char tz_name[40]; int ret; if (gearbox_tz->slot_index) diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index d23f293e28..1e150ce1c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -424,9 +424,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size, if (in_mbox) { reg_size = mlxsw_i2c_get_reg_size(in_mbox); - num = reg_size / mlxsw_i2c->block_size; - if (reg_size % mlxsw_i2c->block_size) - num++; + num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size); if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) { dev_err(&client->dev, "Could not acquire lock"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 51eea1f052..e4b25e1874 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -105,6 +105,8 @@ struct mlxsw_pci { u64 free_running_clock_offset; u64 utc_sec_offset; u64 utc_nsec_offset; + bool lag_mode_support; + enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode; struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; u32 doorbell_offset; struct mlxsw_core *core; @@ -352,14 +354,15 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, } static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue_elem_info *elem_info) + struct mlxsw_pci_queue_elem_info *elem_info, + gfp_t gfp) { size_t buf_len = MLXSW_PORT_MAX_MTU; char *wqe = elem_info->elem; struct sk_buff *skb; int err; - skb = netdev_alloc_skb_ip_align(NULL, buf_len); + skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp); if (!skb) return -ENOMEM; @@ -420,7 +423,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, for (i = 0; i < q->count; i++) { elem_info = mlxsw_pci_queue_elem_info_producer_get(q); BUG_ON(!elem_info); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL); if (err) goto rollback; /* Everything is set up, ring doorbell to pass elem to HW */ @@ -640,7 +643,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC); if (err) { dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); goto out; @@ -1311,6 +1314,16 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, profile->cqe_time_stamp_type); } + if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) { + enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode = + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW; + + mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1); + mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode); + mlxsw_pci->lag_mode = lag_mode; + } else { + mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW; + } return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); } @@ -1586,6 +1599,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, mlxsw_pci->utc_nsec_offset = mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox); + mlxsw_pci->lag_mode_support = + mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox); num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); if (err) @@ -1618,9 +1633,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_config_profile; - /* Some resources depend on unified bridge model, which is configured - * as part of config_profile. Query the resources again to get correct - * values. + /* Some resources depend on details of config_profile, such as unified + * bridge model. Query the resources again to get correct values. */ err = mlxsw_core_resources_query(mlxsw_core, mbox, res); if (err) @@ -1895,6 +1909,14 @@ static u32 mlxsw_pci_read_utc_nsec(void *bus_priv) return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset); } +static enum mlxsw_cmd_mbox_config_profile_lag_mode +mlxsw_pci_lag_mode(void *bus_priv) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + + return mlxsw_pci->lag_mode; +} + static const struct mlxsw_bus mlxsw_pci_bus = { .kind = "pci", .init = mlxsw_pci_init, @@ -1906,6 +1928,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .read_frc_l = mlxsw_pci_read_frc_l, .read_utc_sec = mlxsw_pci_read_utc_sec, .read_utc_nsec = mlxsw_pci_read_utc_nsec, + .lag_mode = mlxsw_pci_lag_mode, .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index ae556ddd76..25b294fdeb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -38,18 +38,18 @@ static const struct mlxsw_reg_info mlxsw_reg_##_name = { \ MLXSW_REG_DEFINE(sgcr, MLXSW_REG_SGCR_ID, MLXSW_REG_SGCR_LEN); -/* reg_sgcr_llb - * Link Local Broadcast (Default=0) - * When set, all Link Local packets (224.0.0.X) will be treated as broadcast - * packets and ignore the IGMP snooping entries. +/* reg_sgcr_lag_lookup_pgt_base + * Base address used for lookup in PGT table + * Supported when CONFIG_PROFILE.lag_mode = 1 + * Note: when IGCR.ddd_lag_mode=0, the address shall be aligned to 8 entries. * Access: RW */ -MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1); +MLXSW_ITEM32(reg, sgcr, lag_lookup_pgt_base, 0x0C, 0, 16); -static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb) +static inline void mlxsw_reg_sgcr_pack(char *payload, u16 lag_lookup_pgt_base) { MLXSW_REG_ZERO(sgcr, payload); - mlxsw_reg_sgcr_llb_set(payload, !!llb); + mlxsw_reg_sgcr_lag_lookup_pgt_base_set(payload, lag_lookup_pgt_base); } /* SPAD - Switch Physical Address Register @@ -9551,7 +9551,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1); #define MLXSW_REG_MTBR_ID 0x900F #define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */ #define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */ -#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */ +#define MLXSW_REG_MTBR_REC_MAX_COUNT 1 #define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \ MLXSW_REG_MTBR_REC_LEN * \ MLXSW_REG_MTBR_REC_MAX_COUNT) @@ -9597,12 +9597,12 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false); static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index, - u16 base_sensor_index, u8 num_rec) + u16 base_sensor_index) { MLXSW_REG_ZERO(mtbr, payload); mlxsw_reg_mtbr_slot_index_set(payload, slot_index); mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index); - mlxsw_reg_mtbr_num_rec_set(payload, num_rec); + mlxsw_reg_mtbr_num_rec_set(payload, 1); } /* Error codes from temperatute reading */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9dbd5edff0..cec72d99d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2692,6 +2692,63 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->trap); } +static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) +{ + char sgcr_pl[MLXSW_REG_SGCR_LEN]; + u16 max_lag; + int err; + + if (mlxsw_core_lag_mode(mlxsw_sp->core) != + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) + return 0; + + err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); + if (err) + return err; + + /* In DDD mode, which we by default use, each LAG entry is 8 PGT + * entries. The LAG table address needs to be 8-aligned, but that ought + * to be the case, since the LAG table is allocated first. + */ + err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, + max_lag * 8); + if (err) + return err; + if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { + err = -EINVAL; + goto err_mid_alloc_range; + } + + mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl); + if (err) + goto err_mid_alloc_range; + + return 0; + +err_mid_alloc_range: + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, + max_lag * 8); + return err; +} + +static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) +{ + u16 max_lag; + int err; + + if (mlxsw_core_lag_mode(mlxsw_sp->core) != + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) + return; + + err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); + if (err) + return; + + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, + max_lag * 8); +} + #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) @@ -2723,16 +2780,27 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) return -EIO; + err = mlxsw_sp_lag_pgt_init(mlxsw_sp); + if (err) + return err; + mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper), GFP_KERNEL); - if (!mlxsw_sp->lags) - return -ENOMEM; + if (!mlxsw_sp->lags) { + err = -ENOMEM; + goto err_kcalloc; + } return 0; + +err_kcalloc: + mlxsw_sp_lag_pgt_fini(mlxsw_sp); + return err; } static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) { + mlxsw_sp_lag_pgt_fini(mlxsw_sp); kfree(mlxsw_sp->lags); } @@ -3113,6 +3181,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_pgt_init; } + /* Initialize before FIDs so that the LAG table is at the start of PGT + * and 8-aligned without overallocation. + */ + err = mlxsw_sp_lag_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); + goto err_lag_init; + } + err = mlxsw_sp_fids_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); @@ -3143,12 +3220,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_buffers_init; } - err = mlxsw_sp_lag_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); - goto err_lag_init; - } - /* Initialize SPAN before router and switchdev, so that those components * can call mlxsw_sp_span_respin(). */ @@ -3300,8 +3371,6 @@ err_counter_pool_init: err_switchdev_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: - mlxsw_sp_lag_fini(mlxsw_sp); -err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); err_buffers_init: mlxsw_sp_devlink_traps_fini(mlxsw_sp); @@ -3312,6 +3381,8 @@ err_traps_init: err_policers_init: mlxsw_sp_fids_fini(mlxsw_sp); err_fids_init: + mlxsw_sp_lag_fini(mlxsw_sp); +err_lag_init: mlxsw_sp_pgt_fini(mlxsw_sp); err_pgt_init: mlxsw_sp_kvdl_fini(mlxsw_sp); @@ -3477,12 +3548,12 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); - mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_devlink_traps_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_policers_fini(mlxsw_sp); mlxsw_sp_fids_fini(mlxsw_sp); + mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_pgt_fini(mlxsw_sp); mlxsw_sp_kvdl_fini(mlxsw_sp); mlxsw_sp_parsing_fini(mlxsw_sp); @@ -3526,6 +3597,7 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { }, .used_cqe_time_stamp_type = 1, .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, + .lag_mode_prefer_sw = true, }; /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs @@ -3553,6 +3625,7 @@ static const struct mlxsw_config_profile mlxsw_sp4_config_profile = { }, .used_cqe_time_stamp_type = 1, .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, + .lag_mode_prefer_sw = true, }; static void diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 02ca2871b6..c70333b460 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -212,6 +212,7 @@ struct mlxsw_sp { struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */ struct mlxsw_sp_pgt *pgt; bool pgt_smpe_index_valid; + u16 lag_pgt_base; }; struct mlxsw_sp_ptp_ops { @@ -1480,7 +1481,7 @@ int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core); /* spectrum_pgt.c */ int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid); void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base); -int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, +int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 *mid_base, u16 count); void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c index b1178b7a7f..99eeafdc8d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -45,8 +45,7 @@ static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp, } static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = { - MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, + MLXSW_AFK_ELEMENT_VIRT_ROUTER, MLXSW_AFK_ELEMENT_SRC_IP_0_31, MLXSW_AFK_ELEMENT_DST_IP_0_31, }; @@ -89,8 +88,9 @@ static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam) } static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = { + MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7, MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, MLXSW_AFK_ELEMENT_SRC_IP_96_127, MLXSW_AFK_ELEMENT_SRC_IP_64_95, MLXSW_AFK_ELEMENT_SRC_IP_32_63, @@ -142,6 +142,8 @@ static void mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_mr_route_key *key) { + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER, + key->vrid, GENMASK(11, 0)); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, (char *) &key->source.addr4, (char *) &key->source_mask.addr4, 4); @@ -154,6 +156,13 @@ static void mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_mr_route_key *key) { + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3, + key->vrid, GENMASK(3, 0)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7, + key->vrid >> 4, GENMASK(3, 0)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, + key->vrid >> 8, GENMASK(3, 0)); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, &key->source.addr6.s6_addr[0x0], &key->source_mask.addr6.s6_addr[0x0], 4); @@ -189,11 +198,6 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule, rulei = mlxsw_sp_acl_rule_rulei(rule); rulei->priority = priority; - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, - key->vrid, GENMASK(7, 0)); - mlxsw_sp_acl_rulei_keymask_u32(rulei, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - key->vrid >> 8, GENMASK(3, 0)); switch (key->proto) { case MLXSW_SP_L3_PROTO_IPV4: return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index cb746a43b2..eaad786056 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -171,20 +171,22 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = { + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = { + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), }; @@ -220,7 +222,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), - MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4), + MLXSW_AFK_BLOCK(0x3D, mlxsw_sp_afk_element_info_ipv4_5), MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2), @@ -322,33 +324,33 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = { MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */ }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = { - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8), - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4), +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), }; static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = { - MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0), - MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x10, mlxsw_sp_afk_element_info_mac_0), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x11, mlxsw_sp_afk_element_info_mac_1), MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2), MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3), MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4), - MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b), - MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), - MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1), MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), - MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b), + MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b), MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b), MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3), MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4), MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5), - MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x90, mlxsw_sp_afk_element_info_l4_0), MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c index ee59c79156..50e591420b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c @@ -24,7 +24,7 @@ struct mlxsw_sp_counter_pool { spinlock_t counter_pool_lock; /* Protects counter pool allocations */ atomic_t active_entries_count; unsigned int sub_pools_count; - struct mlxsw_sp_counter_sub_pool sub_pools[]; + struct mlxsw_sp_counter_sub_pool sub_pools[] __counted_by(sub_pools_count); }; static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 472830d07a..0f29e9c194 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -619,7 +619,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) int i; for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", + snprintf(*p, ETH_GSTRING_LEN, "%.28s_%d", mlxsw_sp_port_hw_tc_stats[i].str, tc); *p += ETH_GSTRING_LEN; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 9df0984747..e954b8cd2e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -320,6 +320,14 @@ mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family) return fid_family->end_index - fid_family->start_index + 1; } +static u16 +mlxsw_sp_fid_family_pgt_size(const struct mlxsw_sp_fid_family *fid_family) +{ + u16 num_fids = mlxsw_sp_fid_family_num_fids(fid_family); + + return num_fids * fid_family->nr_flood_tables; +} + static u16 mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family, const struct mlxsw_sp_flood_table *flood_table, @@ -1068,8 +1076,6 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = { #define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2) #define MLXSW_SP_FID_RFID_MAX (11 * 1024) -#define MLXSW_SP_FID_8021Q_PGT_BASE 0 -#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX) static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = { { @@ -1434,7 +1440,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = { .ops = &mlxsw_sp_fid_8021q_ops, .flood_rsp = false, .bridge_type = MLXSW_REG_BRIDGE_TYPE_0, - .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE, .smpe_index_valid = false, }; @@ -1448,7 +1453,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = { .rif_type = MLXSW_SP_RIF_TYPE_FID, .ops = &mlxsw_sp_fid_8021d_ops, .bridge_type = MLXSW_REG_BRIDGE_TYPE_1, - .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE, .smpe_index_valid = false, }; @@ -1490,7 +1494,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = { .ops = &mlxsw_sp_fid_8021q_ops, .flood_rsp = false, .bridge_type = MLXSW_REG_BRIDGE_TYPE_0, - .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE, .smpe_index_valid = true, }; @@ -1504,7 +1507,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = { .rif_type = MLXSW_SP_RIF_TYPE_FID, .ops = &mlxsw_sp_fid_8021d_ops, .bridge_type = MLXSW_REG_BRIDGE_TYPE_1, - .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE, .smpe_index_valid = true, }; @@ -1654,14 +1656,10 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family, enum mlxsw_sp_flood_type packet_type = flood_table->packet_type; struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; const int *sfgc_packet_types; - u16 num_fids, mid_base; + u16 mid_base; int err, i; mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0); - num_fids = mlxsw_sp_fid_family_num_fids(fid_family); - err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids); - if (err) - return err; sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type]; for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) { @@ -1675,57 +1673,56 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family, err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl); if (err) - goto err_reg_write; + return err; } return 0; - -err_reg_write: - mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids); - return err; -} - -static void -mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family, - const struct mlxsw_sp_flood_table *flood_table) -{ - struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; - u16 num_fids, mid_base; - - mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0); - num_fids = mlxsw_sp_fid_family_num_fids(fid_family); - mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids); } static int mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family) { + struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; + u16 pgt_size; + int err; int i; + if (!fid_family->nr_flood_tables) + return 0; + + pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family); + err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &fid_family->pgt_base, + pgt_size); + if (err) + return err; + for (i = 0; i < fid_family->nr_flood_tables; i++) { const struct mlxsw_sp_flood_table *flood_table; - int err; flood_table = &fid_family->flood_tables[i]; err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table); if (err) - return err; + goto err_flood_table_init; } return 0; + +err_flood_table_init: + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size); + return err; } static void mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family) { - int i; + struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; + u16 pgt_size; - for (i = 0; i < fid_family->nr_flood_tables; i++) { - const struct mlxsw_sp_flood_table *flood_table; + if (!fid_family->nr_flood_tables) + return; - flood_table = &fid_family->flood_tables[i]; - mlxsw_sp_fid_flood_table_fini(fid_family, flood_table); - } + pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family); + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size); } static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c index 7dd3dba0fa..4ef81bac17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c @@ -54,25 +54,15 @@ void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base) mutex_unlock(&mlxsw_sp->pgt->lock); } -int -mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count) +int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 *p_mid_base, + u16 count) { - unsigned int idr_cursor; + unsigned int mid_base; int i, err; mutex_lock(&mlxsw_sp->pgt->lock); - /* This function is supposed to be called several times as part of - * driver init, in specific order. Verify that the mid_index is the - * first free index in the idr, to be able to free the indexes in case - * of error. - */ - idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); - if (WARN_ON(idr_cursor != mid_base)) { - err = -EINVAL; - goto err_idr_cursor; - } - + mid_base = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); for (i = 0; i < count; i++) { err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL, mid_base, mid_base + count, GFP_KERNEL); @@ -81,12 +71,12 @@ mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count) } mutex_unlock(&mlxsw_sp->pgt->lock); + *p_mid_base = mid_base; return 0; err_idr_alloc_cyclic: for (i--; i >= 0; i--) idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i); -err_idr_cursor: mutex_unlock(&mlxsw_sp->pgt->lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index ae2fb9efbc..39e6d941fd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3107,7 +3107,7 @@ struct mlxsw_sp_nexthop_group_info { gateway:1, /* routes using the group use a gateway */ is_resilient:1; struct list_head list; /* member in nh_res_grp_list */ - struct mlxsw_sp_nexthop nexthops[]; + struct mlxsw_sp_nexthop nexthops[] __counted_by(count); }; static struct mlxsw_sp_rif * diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index b3472fb946..af50ff9e5f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -31,7 +31,7 @@ struct mlxsw_sp_span { refcount_t policer_id_base_ref_count; atomic_t active_entries_count; int entries_count; - struct mlxsw_sp_span_entry entries[]; + struct mlxsw_sp_span_entry entries[] __counted_by(entries_count); }; struct mlxsw_sp_span_analyzed_port { diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index c11b118dc4..ddd87ef71c 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1228,7 +1228,7 @@ err_mem_region: return err; } -static int ks8842_remove(struct platform_device *pdev) +static void ks8842_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ks8842_adapter *adapter = netdev_priv(netdev); @@ -1239,7 +1239,6 @@ static int ks8842_remove(struct platform_device *pdev) iounmap(adapter->hw_addr); free_netdev(netdev); release_mem_region(iomem->start, resource_size(iomem)); - return 0; } @@ -1248,7 +1247,7 @@ static struct platform_driver ks8842_platform_driver = { .name = DRV_NAME, }, .probe = ks8842_probe, - .remove = ks8842_remove, + .remove_new = ks8842_remove, }; module_platform_driver(ks8842_platform_driver); diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c index 7f49042484..2a7f298542 100644 --- a/drivers/net/ethernet/micrel/ks8851_par.c +++ b/drivers/net/ethernet/micrel/ks8851_par.c @@ -327,11 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev) return ks8851_probe_common(netdev, dev, msg_enable); } -static int ks8851_remove_par(struct platform_device *pdev) +static void ks8851_remove_par(struct platform_device *pdev) { ks8851_remove_common(&pdev->dev); - - return 0; } static const struct of_device_id ks8851_match_table[] = { @@ -347,7 +345,7 @@ static struct platform_driver ks8851_driver = { .pm = &ks8851_pm_ops, }, .probe = ks8851_probe_par, - .remove = ks8851_remove_par, + .remove_new = ks8851_remove_par, }; module_platform_driver(ks8851_driver); diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 2db5949b4c..6961cfc55f 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1047,7 +1047,8 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev, BIT(HWTSTAMP_TX_ON) | BIT(HWTSTAMP_TX_ONESTEP_SYNC); ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | - BIT(HWTSTAMP_FILTER_ALL); + BIT(HWTSTAMP_FILTER_ALL) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index c81cdeb4d4..45e209a7d0 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1466,9 +1466,15 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) static void lan743x_phy_close(struct lan743x_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct phy_device *phydev = netdev->phydev; phy_stop(netdev->phydev); phy_disconnect(netdev->phydev); + + /* using phydev here as phy_disconnect NULLs netdev->phydev */ + if (phy_is_pseudo_fixed_link(phydev)) + fixed_phy_unregister(phydev); + } static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) @@ -1864,6 +1870,50 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) return last_head - last_tail - 1; } +static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter, + int rx_ts_config) +{ + int channel_number; + int index; + u32 data; + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + channel_number = adapter->rx[index].channel_number; + data = lan743x_csr_read(adapter, RX_CFG_B(channel_number)); + data &= RX_CFG_B_TS_MASK_; + data |= rx_ts_config; + lan743x_csr_write(adapter, RX_CFG_B(channel_number), + data); + } +} + +int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter, + int rx_filter) +{ + u32 data; + + switch (rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_EVENT: + lan743x_rx_cfg_b_tstamp_config(adapter, + RX_CFG_B_TS_DESCR_EN_); + data = lan743x_csr_read(adapter, PTP_RX_TS_CFG); + data |= PTP_RX_TS_CFG_EVENT_MSGS_; + lan743x_csr_write(adapter, PTP_RX_TS_CFG, data); + break; + case HWTSTAMP_FILTER_NONE: + lan743x_rx_cfg_b_tstamp_config(adapter, + RX_CFG_B_TS_NONE_); + break; + case HWTSTAMP_FILTER_ALL: + lan743x_rx_cfg_b_tstamp_config(adapter, + RX_CFG_B_TS_ALL_RX_); + break; + default: + return -ERANGE; + } + return 0; +} + void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, bool enable_timestamping, bool enable_onestep_sync) @@ -2938,7 +2988,6 @@ static int lan743x_rx_open(struct lan743x_rx *rx) data |= RX_CFG_B_RX_PAD_2_; data &= ~RX_CFG_B_RX_RING_LEN_MASK_; data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); - data |= RX_CFG_B_TS_ALL_RX_; if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) data |= RX_CFG_B_RDMABL_512_; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 52609fc13a..b648461787 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -522,6 +522,8 @@ (((u32)(rx_latency)) & 0x0000FFFF) #define PTP_CAP_INFO (0x0A60) #define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4) +#define PTP_RX_TS_CFG (0x0A68) +#define PTP_RX_TS_CFG_EVENT_MSGS_ GENMASK(3, 0) #define PTP_TX_MOD (0x0AA4) #define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000) @@ -657,6 +659,9 @@ #define RX_CFG_B(channel) (0xC44 + ((channel) << 6)) #define RX_CFG_B_TS_ALL_RX_ BIT(29) +#define RX_CFG_B_TS_DESCR_EN_ BIT(28) +#define RX_CFG_B_TS_NONE_ 0 +#define RX_CFG_B_TS_MASK_ (0xCFFFFFFF) #define RX_CFG_B_RX_PAD_MASK_ (0x03000000) #define RX_CFG_B_RX_PAD_0_ (0x00000000) #define RX_CFG_B_RX_PAD_2_ (0x02000000) @@ -991,6 +996,9 @@ struct lan743x_rx { struct sk_buff *skb_head, *skb_tail; }; +int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter, + int rx_filter); + /* SGMII Link Speed Duplex status */ enum lan743x_sgmii_lsd { POWER_DOWN = 0, diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 39e1066ecd..2f04bc77a1 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1493,6 +1493,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) temp = lan743x_csr_read(adapter, PTP_TX_MOD2); temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_; lan743x_csr_write(adapter, PTP_TX_MOD2, temp); + + /* Default Timestamping */ + lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE); + lan743x_ptp_enable(adapter); lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); lan743x_csr_write(adapter, PTP_INT_EN_SET, @@ -1653,6 +1657,9 @@ static void lan743x_ptp_disable(struct lan743x_adapter *adapter) { struct lan743x_ptp *ptp = &adapter->ptp; + /* Disable Timestamping */ + lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE); + mutex_lock(&ptp->command_lock); if (!lan743x_ptp_is_enabled(adapter)) { netif_warn(adapter, drv, adapter->netdev, @@ -1785,6 +1792,8 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) break; } + ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter); + if (!ret) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c index 41fa2523d9..5f2cd9a8cf 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c @@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x) /* Now, set PGIDs for each active LAG */ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { - struct net_device *bond = lan966x->ports[lag]->bond; + struct lan966x_port *port = lan966x->ports[lag]; int num_active_ports = 0; + struct net_device *bond; unsigned long bond_mask; u8 aggr_idx[16]; - if (!bond || (visited & BIT(lag))) + if (!port || !port->bond || (visited & BIT(lag))) continue; + bond = port->bond; bond_mask = lan966x_lag_get_mask(lan966x, bond); for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { struct lan966x_port *port = lan966x->ports[p]; + if (!port) + continue; + lan_wr(ANA_PGID_PGID_SET(bond_mask), lan966x, ANA_PGID(p)); if (port->lag_tx_active) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 0d6e79af24..2635ef8958 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -671,7 +671,6 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args) skb = netdev_alloc_skb(dev, len); if (unlikely(!skb)) { netdev_err(dev, "Unable to allocate sk_buff\n"); - err = -ENOMEM; break; } buf_len = len - ETH_FCS_LEN; @@ -1261,7 +1260,7 @@ cleanup_ports: return err; } -static int lan966x_remove(struct platform_device *pdev) +static void lan966x_remove(struct platform_device *pdev) { struct lan966x *lan966x = platform_get_drvdata(pdev); @@ -1280,13 +1279,11 @@ static int lan966x_remove(struct platform_device *pdev) lan966x_ptp_deinit(lan966x); debugfs_remove_recursive(lan966x->debugfs_root); - - return 0; } static struct platform_driver lan966x_driver = { .probe = lan966x_probe, - .remove = lan966x_remove, + .remove_new = lan966x_remove, .driver = { .name = "lan966x-switch", .of_match_table = lan966x_match, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 92108d3540..2e83bbb947 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port) lan966x_taprio_speed_set(port, config->speed); /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the - * port speed for QSGMII ports. + * port speed for QSGMII or SGMII ports. */ - if (phy_interface_num_ports(config->portmode) == 4) + if (phy_interface_num_ports(config->portmode) == 4 || + config->portmode == PHY_INTERFACE_MODE_SGMII) mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); lan_wr(config->duplex | mode, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index 01f3a3a41c..37d2584b48 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1012,8 +1012,7 @@ static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data) return; for (idx = 0; idx < sparx5->num_ethtool_stats; idx++) - strncpy(data + idx * ETH_GSTRING_LEN, - sparx5->stats_layout[idx], ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", sparx5->stats_layout[idx]); } static void sparx5_get_sset_data(struct net_device *ndev, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index dc9af480bf..3c066b62e6 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sparx5); sparx5->pdev = pdev; sparx5->dev = &pdev->dev; + spin_lock_init(&sparx5->tx_lock); /* Do switch core reset if available */ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); @@ -911,7 +912,7 @@ cleanup_pnode: return err; } -static int mchp_sparx5_remove(struct platform_device *pdev) +static void mchp_sparx5_remove(struct platform_device *pdev) { struct sparx5 *sparx5 = platform_get_drvdata(pdev); @@ -931,8 +932,6 @@ static int mchp_sparx5_remove(struct platform_device *pdev) /* Unregister netdevs */ sparx5_unregister_notifier_blocks(sparx5); destroy_workqueue(sparx5->mact_queue); - - return 0; } static const struct of_device_id mchp_sparx5_match[] = { @@ -943,7 +942,7 @@ MODULE_DEVICE_TABLE(of, mchp_sparx5_match); static struct platform_driver mchp_sparx5_driver = { .probe = mchp_sparx5_probe, - .remove = mchp_sparx5_remove, + .remove_new = mchp_sparx5_remove, .driver = { .name = "sparx5-switch", .of_match_table = mchp_sparx5_match, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 6f565c0c0c..316fed5f27 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -280,6 +280,7 @@ struct sparx5 { int xtr_irq; /* Frame DMA */ int fdma_irq; + spinlock_t tx_lock; /* lock for frame transmission */ struct sparx5_rx rx; struct sparx5_tx tx; /* PTP */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 6db6ac6a3b..ac7e1cffbc 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) } skb_tx_timestamp(skb); + spin_lock(&sparx5->tx_lock); if (sparx5->fdma_irq > 0) ret = sparx5_fdma_xmit(sparx5, ifh, skb); else ret = sparx5_inject(sparx5, ifh, skb, dev); + spin_unlock(&sparx5->tx_lock); if (ret == -EBUSY) goto busy; diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c index c2c3397c58..59bfbda29b 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c @@ -300,7 +300,7 @@ static int vcap_show_admin(struct vcap_control *vctrl, vcap_show_admin_info(vctrl, admin, out); list_for_each_entry(elem, &admin->rules, list) { vrule = vcap_decode_rule(elem); - if (IS_ERR_OR_NULL(vrule)) { + if (IS_ERR(vrule)) { ret = PTR_ERR(vrule); break; } diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 48ea4aeeea..fc3d2903a8 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -2687,8 +2687,9 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; ndev->vlan_features = ndev->features; - ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT; + xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT); err = register_netdev(ndev); if (err) { diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 3da99b6279..96dc69e714 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -558,7 +558,7 @@ irq_map_fail: return ret; } -static int moxart_remove(struct platform_device *pdev) +static void moxart_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); @@ -566,8 +566,6 @@ static int moxart_remove(struct platform_device *pdev) devm_free_irq(&pdev->dev, ndev->irq, ndev); moxart_mac_free_memory(ndev); free_netdev(ndev); - - return 0; } static const struct of_device_id moxart_mac_match[] = { @@ -578,7 +576,7 @@ MODULE_DEVICE_TABLE(of, moxart_mac_match); static struct platform_driver moxart_mac_driver = { .probe = moxart_mac_probe, - .remove = moxart_remove, + .remove_new = moxart_remove, .driver = { .name = "moxart-ethernet", .of_match_table = moxart_mac_match, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 151b424653..993212c3a7 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -392,7 +392,7 @@ out_free_devlink: return err; } -static int mscc_ocelot_remove(struct platform_device *pdev) +static void mscc_ocelot_remove(struct platform_device *pdev) { struct ocelot *ocelot = platform_get_drvdata(pdev); @@ -408,13 +408,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev) unregister_switchdev_notifier(&ocelot_switchdev_nb); unregister_netdevice_notifier(&ocelot_netdevice_nb); devlink_free(ocelot->devlink); - - return 0; } static struct platform_driver mscc_ocelot_driver = { .probe = mscc_ocelot_probe, - .remove = mscc_ocelot_remove, + .remove_new = mscc_ocelot_remove, .driver = { .name = "ocelot-switch", .of_match_table = mscc_ocelot_match, diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index 3f371faeb6..2b6e097df2 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -227,7 +227,7 @@ MODULE_ALIAS("platform:jazzsonic"); #include "sonic.c" -static int jazz_sonic_device_remove(struct platform_device *pdev) +static void jazz_sonic_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); @@ -237,13 +237,11 @@ static int jazz_sonic_device_remove(struct platform_device *pdev) lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); - - return 0; } static struct platform_driver jazz_sonic_driver = { .probe = jazz_sonic_probe, - .remove = jazz_sonic_device_remove, + .remove_new = jazz_sonic_device_remove, .driver = { .name = jazz_sonic_string, }, diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index b16f7c830f..2fc63860db 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -532,7 +532,7 @@ MODULE_ALIAS("platform:macsonic"); #include "sonic.c" -static int mac_sonic_platform_remove(struct platform_device *pdev) +static void mac_sonic_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); @@ -541,13 +541,11 @@ static int mac_sonic_platform_remove(struct platform_device *pdev) dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); free_netdev(dev); - - return 0; } static struct platform_driver mac_sonic_platform_driver = { .probe = mac_sonic_platform_probe, - .remove = mac_sonic_platform_remove, + .remove_new = mac_sonic_platform_remove, .driver = { .name = "macsonic", }, diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 52fef34d43..8943e72443 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -249,7 +249,7 @@ MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); #include "sonic.c" -static int xtsonic_device_remove(struct platform_device *pdev) +static void xtsonic_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local *lp = netdev_priv(dev); @@ -260,13 +260,11 @@ static int xtsonic_device_remove(struct platform_device *pdev) lp->descriptors, lp->descriptors_laddr); release_region (dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); - - return 0; } static struct platform_driver xtsonic_driver = { .probe = xtsonic_probe, - .remove = xtsonic_device_remove, + .remove_new = xtsonic_device_remove, .driver = { .name = xtsonic_string, }, diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c index b1f026b81d..cc54faca22 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c @@ -378,6 +378,34 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, /* Encryption */ switch (x->props.ealgo) { case SADB_EALG_NONE: + /* The xfrm descriptor for CHACAH20_POLY1305 does not set the algorithm id, which + * is the default value SADB_EALG_NONE. In the branch of SADB_EALG_NONE, driver + * uses algorithm name to identify CHACAH20_POLY1305's algorithm. + */ + if (x->aead && !strcmp(x->aead->alg_name, "rfc7539esp(chacha20,poly1305)")) { + if (nn->pdev->device != PCI_DEVICE_ID_NFP3800) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported encryption algorithm for offload"); + return -EINVAL; + } + if (x->aead->alg_icv_len != 128) { + NL_SET_ERR_MSG_MOD(extack, + "ICV must be 128bit with CHACHA20_POLY1305"); + return -EINVAL; + } + + /* Aead->alg_key_len includes 32-bit salt */ + if (x->aead->alg_key_len - 32 != 256) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported CHACHA20 key length"); + return -EINVAL; + } + + /* The CHACHA20's mode is not configured */ + cfg->ctrl_word.hash = NFP_IPSEC_HASH_POLY1305_128; + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_CHACHA20; + break; + } + fallthrough; case SADB_EALG_NULL: cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL; @@ -427,6 +455,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, } if (x->aead) { + int key_offset = 0; int salt_len = 4; key_len = DIV_ROUND_UP(x->aead->alg_key_len, BITS_PER_BYTE); @@ -437,9 +466,19 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, return -EINVAL; } - for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++) - cfg->ciph_key[i] = get_unaligned_be32(x->aead->alg_key + - sizeof(cfg->ciph_key[0]) * i); + /* The CHACHA20's key order needs to be adjusted based on hardware design. + * Other's key order: {K0, K1, K2, K3, K4, K5, K6, K7} + * CHACHA20's key order: {K4, K5, K6, K7, K0, K1, K2, K3} + */ + if (!strcmp(x->aead->alg_name, "rfc7539esp(chacha20,poly1305)")) + key_offset = key_len / sizeof(cfg->ciph_key[0]) >> 1; + + for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]); i++) { + int index = (i + key_offset) % (key_len / sizeof(cfg->ciph_key[0])); + + cfg->ciph_key[index] = get_unaligned_be32(x->aead->alg_key + + sizeof(cfg->ciph_key[0]) * i); + } /* Load up the salt */ cfg->aesgcm_fields.salt = get_unaligned_be32(x->aead->alg_key + key_len); diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 2967bab725..15180538b8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_ mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask); return; + /* Both struct tcphdr and struct udphdr start with + * __be16 source; + * __be16 dest; + * so we can use the same code for both. + */ case FLOW_ACT_MANGLE_HDR_TYPE_TCP: case FLOW_ACT_MANGLE_HDR_TYPE_UDP: - mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val); - mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask); + if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) { + mangle_action->mangle.val = + (__force u32)cpu_to_be32(mangle_action->mangle.val << 16); + /* The mask of mangle action is inverse mask, + * so clear the dest tp port with 0xFFFF to + * instead of rotate-left operation. + */ + mangle_action->mangle.mask = + (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF); + } + if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) { + mangle_action->mangle.offset = 0; + mangle_action->mangle.val = + (__force u32)cpu_to_be32(mangle_action->mangle.val); + mangle_action->mangle.mask = + (__force u32)cpu_to_be32(mangle_action->mangle.mask); + } return; default: @@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv, { struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct nfp_fl_ct_flow_entry *ct_entry; + struct flow_action_entry *ct_goto; struct nfp_fl_ct_zone_entry *zt; + struct flow_action_entry *act; bool wildcarded = false; struct flow_match_ct ct; - struct flow_action_entry *ct_goto; + int i; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_REDIRECT_INGRESS: + case FLOW_ACTION_MIRRED: + case FLOW_ACTION_MIRRED_INGRESS: + if (act->dev->rtnl_link_ops && + !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: out port is openvswitch internal port"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + } flow_rule_match_ct(rule, &ct); if (!ct.mask->ct_zone) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index e522845c7c..0d7d138d6e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); - if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { + if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) { if (entry->bridge_count || !nfp_flower_is_supported_bridge(netdev)) { nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c index 0cc026b0ae..17381bfc15 100644 --- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c @@ -1070,7 +1070,7 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_repr_inc_rx_stats(netdev, pkt_len); } - skb = build_skb(rxbuf->frag, true_bufsz); + skb = napi_build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c index 5d9db8c2a5..45be6954d5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c +++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c @@ -256,7 +256,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget, nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring); if (xdp_redir) - xdp_do_flush_map(); + xdp_do_flush(); if (tx_ring->wr_ptr_add) nfp_net_tx_xmit_more_flush(tx_ring); diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c index 33b6d74adb..8d78c6faef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c @@ -1189,7 +1189,7 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_repr_inc_rx_stats(netdev, pkt_len); } - skb = build_skb(rxbuf->frag, true_bufsz); + skb = napi_build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index de0a5d5ded..f2085340a1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2588,6 +2588,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) case NFP_NFD_VER_NFD3: netdev->netdev_ops = &nfp_nfd3_netdev_ops; netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; break; case NFP_NFD_VER_NFDK: netdev->netdev_ops = &nfp_nfdk_netdev_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 48a74accbb..77bf4198db 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -18,7 +18,7 @@ struct nfp_port; */ struct nfp_reprs { unsigned int num_reprs; - struct net_device __rcu *reprs[]; + struct net_device __rcu *reprs[] __counted_by(num_reprs); }; /** diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 33b4c28563..3f10c5365c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); const u32 barcfg_msix_xpb = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) | NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( NFP_CPP_TARGET_ISLAND_XPB); const u32 barcfg_explicit[4] = { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 6e044ac049..00264af13b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -241,7 +241,7 @@ struct nfp_eth_table { u64 link_modes_supp[2]; u64 link_modes_ad[2]; - } ports[]; + } ports[] __counted_by(count); }; struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index ce7492a6a9..279ea0b569 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -159,7 +159,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) if (!res) return ERR_PTR(-ENOMEM); - strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + strscpy(res->name, name, sizeof(res->name)); dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, NFP_RESOURCE_TBL_BASE, diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index ba27bbc68f..fa1f78b03c 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -683,7 +683,7 @@ static int nixge_poll(struct napi_struct *napi, int budget) if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { /* If there's more, reschedule, but clear */ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); - napi_reschedule(napi); + napi_schedule(napi); } else { /* if not, turn on RX IRQs again ... */ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); @@ -755,8 +755,7 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); - if (napi_schedule_prep(&priv->napi)) - __napi_schedule(&priv->napi); + napi_schedule(&priv->napi); goto out; } if (!(status & XAXIDMA_IRQ_ALL_MASK)) { @@ -1397,7 +1396,7 @@ free_netdev: return err; } -static int nixge_remove(struct platform_device *pdev) +static void nixge_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct nixge_priv *priv = netdev_priv(ndev); @@ -1412,13 +1411,11 @@ static int nixge_remove(struct platform_device *pdev) mdiobus_unregister(priv->mii_bus); free_netdev(ndev); - - return 0; } static struct platform_driver nixge_driver = { .probe = nixge_probe, - .remove = nixge_remove, + .remove_new = nixge_remove, .driver = { .name = "nixge", .of_match_table = nixge_dt_ids, diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 1a4a272f4c..dd3e58a131 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1417,7 +1417,7 @@ err_exit: return ret; } -static int lpc_eth_drv_remove(struct platform_device *pdev) +static void lpc_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat = netdev_priv(ndev); @@ -1436,8 +1436,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -1505,7 +1503,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match); static struct platform_driver lpc_eth_driver = { .probe = lpc_eth_drv_probe, - .remove = lpc_eth_drv_remove, + .remove_new = lpc_eth_drv_remove, #ifdef CONFIG_PM .suspend = lpc_eth_drv_suspend, .resume = lpc_eth_drv_resume, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index d6ce113a42..fa4237c27e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -392,6 +392,10 @@ static void ionic_remove(struct pci_dev *pdev) del_timer_sync(&ionic->watchdog_timer); if (ionic->lif) { + /* prevent adminq cmds if already known as down */ + if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state)) + set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state); + ionic_lif_unregister(ionic->lif); ionic_devlink_unregister(ionic); ionic_lif_deinit(ionic->lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index c06576f439..22ab0a44fa 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -321,6 +321,7 @@ void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) { + idev->opcode = cmd->cmd.opcode; memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); iowrite32(0, &idev->dev_cmd_regs->done); iowrite32(1, &idev->dev_cmd_regs->doorbell); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index bd2d4a26f5..fd112bee4d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -7,6 +7,7 @@ #include #include #include +#include #include "ionic_if.h" #include "ionic_regs.h" @@ -152,6 +153,7 @@ struct ionic_dev { bool fw_hb_ready; bool fw_status_ready; u8 fw_generation; + u8 opcode; u64 __iomem *db_pages; dma_addr_t phy_db_pages; @@ -217,7 +219,7 @@ struct ionic_desc_info { }; unsigned int bytes; unsigned int nbufs; - struct ionic_buf_info bufs[IONIC_MAX_FRAGS]; + struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1]; ionic_desc_cb cb; void *cb_arg; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index c9bd2c57a3..075e0e3fc2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3238,6 +3238,9 @@ static void ionic_lif_reset(struct ionic_lif *lif) { struct ionic_dev *idev = &lif->ionic->idev; + if (!ionic_is_fw_running(idev)) + return; + mutex_lock(&lif->ionic->dev_cmd_lock); ionic_dev_cmd_lif_reset(idev, lif->index); ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); @@ -3831,6 +3834,18 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif) qtype, qti->max_sg_elems); dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", qtype, qti->sg_desc_stride); + + if (qti->max_sg_elems >= IONIC_MAX_FRAGS) { + qti->max_sg_elems = IONIC_MAX_FRAGS - 1; + dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n", + qtype, qti->max_sg_elems); + } + + if (qti->max_sg_elems > MAX_SKB_FRAGS) { + qti->max_sg_elems = MAX_SKB_FRAGS; + dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n", + qtype, qti->max_sg_elems); + } } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 1dc79cecc5..83c413a10f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -410,22 +410,28 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, do_msg); } -int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +static int __ionic_adminq_post_wait(struct ionic_lif *lif, + struct ionic_admin_ctx *ctx, + const bool do_msg) { int err; + if (!ionic_is_fw_running(&lif->ionic->idev)) + return 0; + err = ionic_adminq_post(lif, ctx); - return ionic_adminq_wait(lif, ctx, err, true); + return ionic_adminq_wait(lif, ctx, err, do_msg); } -int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) { - int err; - - err = ionic_adminq_post(lif, ctx); + return __ionic_adminq_post_wait(lif, ctx, true); +} - return ionic_adminq_wait(lif, ctx, err, false); +int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + return __ionic_adminq_post_wait(lif, ctx, false); } static void ionic_dev_cmd_clean(struct ionic *ionic) @@ -465,7 +471,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds, */ max_wait = jiffies + (max_seconds * HZ); try_again: - opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode); + opcode = idev->opcode; start_time = jiffies; for (fw_up = ionic_is_fw_running(idev); !done && fw_up && time_before(jiffies, max_wait); @@ -554,8 +560,8 @@ int ionic_identify(struct ionic *ionic) memset(ident, 0, sizeof(*ident)); ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX); - strncpy(ident->drv.driver_ver_str, UTS_RELEASE, - sizeof(ident->drv.driver_ver_str) - 1); + strscpy(ident->drv.driver_ver_str, UTS_RELEASE, + sizeof(ident->drv.driver_ver_str)); mutex_lock(&ionic->dev_cmd_lock); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 44466e8c5d..ccc1b1d407 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -1243,25 +1243,84 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); + bool too_many_frags = false; + skb_frag_t *frag; + int desc_bufs; + int chunk_len; + int frag_rem; + int tso_rem; + int seg_rem; + bool encap; + int hdrlen; int ndescs; int err; /* Each desc is mss long max, so a descriptor for each gso_seg */ - if (skb_is_gso(skb)) + if (skb_is_gso(skb)) { ndescs = skb_shinfo(skb)->gso_segs; - else + } else { ndescs = 1; + if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) { + too_many_frags = true; + goto linearize; + } + } - /* If non-TSO, just need 1 desc and nr_frags sg elems */ - if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) + /* If non-TSO, or no frags to check, we're done */ + if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags) return ndescs; - /* Too many frags, so linearize */ - err = skb_linearize(skb); - if (err) - return err; + /* We need to scan the skb to be sure that none of the MTU sized + * packets in the TSO will require more sgs per descriptor than we + * can support. We loop through the frags, add up the lengths for + * a packet, and count the number of sgs used per packet. + */ + tso_rem = skb->len; + frag = skb_shinfo(skb)->frags; + encap = skb->encapsulation; + + /* start with just hdr in first part of first descriptor */ + if (encap) + hdrlen = skb_inner_tcp_all_headers(skb); + else + hdrlen = skb_tcp_all_headers(skb); + seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size); + frag_rem = hdrlen; + + while (tso_rem > 0) { + desc_bufs = 0; + while (seg_rem > 0) { + desc_bufs++; + + /* We add the +1 because we can take buffers for one + * more than we have SGs: one for the initial desc data + * in addition to the SG segments that might follow. + */ + if (desc_bufs > q->max_sg_elems + 1) { + too_many_frags = true; + goto linearize; + } + + if (frag_rem == 0) { + frag_rem = skb_frag_size(frag); + frag++; + } + chunk_len = min(frag_rem, seg_rem); + frag_rem -= chunk_len; + tso_rem -= chunk_len; + seg_rem -= chunk_len; + } + + seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size); + } - stats->linearize++; +linearize: + if (too_many_frags) { + err = skb_linearize(skb); + if (err) + return err; + stats->linearize++; + } return ndescs; } diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1d1e183d3a..ed24d6af74 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -233,9 +233,7 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter) cmask = DMA_BIT_MASK(32); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { -#ifndef CONFIG_IA64 mask = DMA_BIT_MASK(35); -#endif } else { mask = DMA_BIT_MASK(39); cmask = mask; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index cdcead614e..f67be4b8ad 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -3204,8 +3204,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128; - strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); - strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + memcpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + memcpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); /* Dump memory header */ offset += qed_grc_dump_mem_hdr(p_hwfn, @@ -6359,8 +6359,7 @@ static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest) { const char *source_str = &((const char *)buf)[*offset]; - strncpy(dest, source_str, size); - dest[size - 1] = '\0'; + strscpy(dest, source_str, size); *offset += size; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index be5cc8b79b..dad8e617c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -66,12 +66,12 @@ qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, return err; } - err = devlink_fmsg_binary_pair_put(fmsg, "dump_data", - p_dbg_data_buf, dbg_data_buf_size); + devlink_fmsg_binary_pair_put(fmsg, "dump_data", p_dbg_data_buf, + dbg_data_buf_size); vfree(p_dbg_data_buf); - return err; + return 0; } static int diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 95820cf1cd..b6b849a079 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -201,21 +201,6 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { /* Forced speed capabilities maps */ -struct qede_forced_speed_map { - u32 speed; - __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); - - const u32 *cap_arr; - u32 arr_size; -}; - -#define QEDE_FORCED_SPEED_MAP(value) \ -{ \ - .speed = SPEED_##value, \ - .cap_arr = qede_forced_speed_##value, \ - .arr_size = ARRAY_SIZE(qede_forced_speed_##value), \ -} - static const u32 qede_forced_speed_1000[] __initconst = { ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, @@ -263,28 +248,21 @@ static const u32 qede_forced_speed_100000[] __initconst = { ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, }; -static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = { - QEDE_FORCED_SPEED_MAP(1000), - QEDE_FORCED_SPEED_MAP(10000), - QEDE_FORCED_SPEED_MAP(20000), - QEDE_FORCED_SPEED_MAP(25000), - QEDE_FORCED_SPEED_MAP(40000), - QEDE_FORCED_SPEED_MAP(50000), - QEDE_FORCED_SPEED_MAP(100000), +static struct ethtool_forced_speed_map +qede_forced_speed_maps[] __ro_after_init = { + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 1000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 10000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 20000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 25000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 40000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 50000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 100000), }; void __init qede_forced_speed_maps_init(void) { - struct qede_forced_speed_map *map; - u32 i; - - for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) { - map = qede_forced_speed_maps + i; - - linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); - map->cap_arr = NULL; - map->arr_size = 0; - } + ethtool_forced_speed_maps_init(qede_forced_speed_maps, + ARRAY_SIZE(qede_forced_speed_maps)); } /* Ethtool callbacks */ @@ -564,8 +542,8 @@ static int qede_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { const struct ethtool_link_settings *base = &cmd->base; + const struct ethtool_forced_speed_map *map; struct qede_dev *edev = netdev_priv(dev); - const struct qede_forced_speed_map *map; struct qed_link_output current_link; struct qed_link_params params; u32 i; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 19bb16daf4..3270df7254 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -718,7 +718,7 @@ err_undo_netdev: return ret; } -static int emac_remove(struct platform_device *pdev) +static void emac_remove(struct platform_device *pdev) { struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); @@ -742,8 +742,6 @@ static int emac_remove(struct platform_device *pdev) iounmap(adpt->phy.base); free_netdev(netdev); - - return 0; } static void emac_shutdown(struct platform_device *pdev) @@ -762,7 +760,7 @@ static void emac_shutdown(struct platform_device *pdev) static struct platform_driver emac_platform_driver = { .probe = emac_probe, - .remove = emac_remove, + .remove_new = emac_remove, .driver = { .name = "qcom-emac", .of_match_table = emac_dt_match, diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 3ceb57408e..8ef5b0241e 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Renesas device configuration +# Renesas network device configuration # config NET_VENDOR_RENESAS @@ -25,9 +25,6 @@ config SH_ETH select PHYLIB help Renesas SuperH Ethernet device driver. - This driver supporting CPUs are: - - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - R8A7740, R8A774x, R8A777x and R8A779x. config RAVB tristate "Renesas Ethernet AVB support" @@ -39,8 +36,6 @@ config RAVB select PHYLIB help Renesas Ethernet AVB device driver. - This driver supports the following SoCs: - - R8A779x. config RENESAS_ETHER_SWITCH tristate "Renesas Ethernet Switch support" @@ -51,7 +46,5 @@ config RENESAS_ETHER_SWITCH select PHYLINK help Renesas Ethernet Switch device driver. - This driver supports the following SoCs: - - R8A779Fx. endif # NET_VENDOR_RENESAS diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile index 5920058934..e8fd85b5fe 100644 --- a/drivers/net/ethernet/renesas/Makefile +++ b/drivers/net/ethernet/renesas/Makefile @@ -1,14 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for the Renesas device drivers. +# Makefile for the Renesas network device drivers # obj-$(CONFIG_SH_ETH) += sh_eth.o ravb-objs := ravb_main.o ravb_ptp.o - obj-$(CONFIG_RAVB) += ravb.o rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o - obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8fec0dbbbe..0e3731f50f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2912,7 +2912,7 @@ out_free_netdev: return error; } -static int ravb_remove(struct platform_device *pdev) +static void ravb_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); @@ -2942,8 +2942,6 @@ static int ravb_remove(struct platform_device *pdev) reset_control_assert(priv->rstc); free_netdev(ndev); platform_set_drvdata(pdev, NULL); - - return 0; } static int ravb_wol_setup(struct net_device *ndev) @@ -3086,7 +3084,7 @@ static const struct dev_pm_ops ravb_dev_pm_ops = { static struct platform_driver ravb_driver = { .probe = ravb_probe, - .remove = ravb_remove, + .remove_new = ravb_remove, .driver = { .name = "ravb", .pm = &ravb_dev_pm_ops, diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index ae9d8722b7..e77c6ff93d 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -1315,6 +1316,7 @@ static int rswitch_phy_device_init(struct rswitch_device *rdev) if (!phydev) goto out; __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); + phydev->mac_managed_pm = true; phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, rdev->etha->phy_interface); @@ -1405,7 +1407,8 @@ static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) static int rswitch_ether_port_init_all(struct rswitch_private *priv) { - int i, err; + unsigned int i; + int err; rswitch_for_each_enabled_port(priv, i) { err = rswitch_ether_port_init_one(priv->rdev[i]); @@ -1790,7 +1793,8 @@ static void rswitch_device_free(struct rswitch_private *priv, int index) static int rswitch_init(struct rswitch_private *priv) { - int i, err; + unsigned int i; + int err; for (i = 0; i < RSWITCH_NUM_PORTS; i++) rswitch_etha_init(priv, i); @@ -1820,7 +1824,7 @@ static int rswitch_init(struct rswitch_private *priv) for (i = 0; i < RSWITCH_NUM_PORTS; i++) { err = rswitch_device_alloc(priv, i); if (err < 0) { - for (i--; i >= 0; i--) + for (; i-- > 0; ) rswitch_device_free(priv, i); goto err_device_alloc; } @@ -1963,7 +1967,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) static void rswitch_deinit(struct rswitch_private *priv) { - int i; + unsigned int i; rswitch_gwca_hw_deinit(priv); rcar_gen4_ptp_unregister(priv->ptp_priv); @@ -1985,7 +1989,7 @@ static void rswitch_deinit(struct rswitch_private *priv) rswitch_clock_disable(priv); } -static int renesas_eth_sw_remove(struct platform_device *pdev) +static void renesas_eth_sw_remove(struct platform_device *pdev) { struct rswitch_private *priv = platform_get_drvdata(pdev); @@ -1995,15 +1999,54 @@ static int renesas_eth_sw_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); +} + +static int renesas_eth_sw_suspend(struct device *dev) +{ + struct rswitch_private *priv = dev_get_drvdata(dev); + struct net_device *ndev; + unsigned int i; + + rswitch_for_each_enabled_port(priv, i) { + ndev = priv->rdev[i]->ndev; + if (netif_running(ndev)) { + netif_device_detach(ndev); + rswitch_stop(ndev); + } + if (priv->rdev[i]->serdes->init_count) + phy_exit(priv->rdev[i]->serdes); + } + + return 0; +} + +static int renesas_eth_sw_resume(struct device *dev) +{ + struct rswitch_private *priv = dev_get_drvdata(dev); + struct net_device *ndev; + unsigned int i; + + rswitch_for_each_enabled_port(priv, i) { + phy_init(priv->rdev[i]->serdes); + ndev = priv->rdev[i]->ndev; + if (netif_running(ndev)) { + rswitch_open(ndev); + netif_device_attach(ndev); + } + } return 0; } +static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, + renesas_eth_sw_resume); + static struct platform_driver renesas_eth_sw_driver_platform = { .probe = renesas_eth_sw_probe, - .remove = renesas_eth_sw_remove, + .remove_new = renesas_eth_sw_remove, .driver = { .name = "renesas_eth_sw", + .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), .of_match_table = renesas_eth_sw_of_table, } }; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 04f49a7a58..27c9d3872c 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -20,7 +20,7 @@ else #define rswitch_for_each_enabled_port_continue_reverse(priv, i) \ - for (i--; i >= 0; i--) \ + for (; i-- > 0; ) \ if (priv->rdev[i]->disabled) \ continue; \ else diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 274ea16c0a..475e1e8c1d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3431,7 +3431,7 @@ out_release: return ret; } -static int sh_eth_drv_remove(struct platform_device *pdev) +static void sh_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct sh_eth_private *mdp = netdev_priv(ndev); @@ -3441,8 +3441,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev) sh_mdio_release(mdp); pm_runtime_disable(&pdev->dev); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -3562,7 +3560,7 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table); static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, - .remove = sh_eth_drv_remove, + .remove_new = sh_eth_drv_remove, .id_table = sh_eth_id_table, .driver = { .name = CARDNAME, diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index fb59ff9450..e6e130dbe1 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -169,13 +169,11 @@ err_out: * Description: this function calls the main to free the net resources * and calls the platforms hook and release the resources (e.g. mem). */ -static int sxgbe_platform_remove(struct platform_device *pdev) +static void sxgbe_platform_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); sxgbe_drv_remove(ndev); - - return 0; } #ifdef CONFIG_PM @@ -226,7 +224,7 @@ MODULE_DEVICE_TABLE(of, sxgbe_dt_ids); static struct platform_driver sxgbe_platform_driver = { .probe = sxgbe_platform_probe, - .remove = sxgbe_platform_remove, + .remove_new = sxgbe_platform_remove, .driver = { .name = SXGBE_RESOURCE_NAME, .pm = &sxgbe_platform_pm_ops, diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 96065dfc74..76356dadf2 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -819,7 +819,7 @@ err_out: return err; } -static int sgiseeq_remove(struct platform_device *pdev) +static void sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); @@ -828,13 +828,11 @@ static int sgiseeq_remove(struct platform_device *pdev) dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, sp->srings_dma, DMA_BIDIRECTIONAL); free_netdev(dev); - - return 0; } static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, - .remove = sgiseeq_remove, + .remove_new = sgiseeq_remove, .driver = { .name = "sgiseeq", } diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 8d2d7ea2eb..c9e17a8208 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -1260,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush_map(); + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index c3e2b4a21d..10709d828a 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -1291,10 +1291,11 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act) size_t outlen; int rc; - MCDI_POPULATE_DWORD_4(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS, + MCDI_POPULATE_DWORD_5(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS, MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push, MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop, MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap, + MAE_ACTION_SET_ALLOC_IN_DO_NAT, act->do_nat, MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL, act->do_ttl_dec); @@ -1705,8 +1706,10 @@ static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx, /* action */ act = &rule->lhs_act; - MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, - MAE_MCDI_ENCAP_TYPE_NONE); + rc = efx_mae_encap_type_to_mae_type(act->tun_type); + if (rc < 0) + return rc; + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, rc); /* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the * SW path needs to process the packet to update the conntrack tables * on connection establishment (SYN) or termination (FIN, RST). @@ -1734,9 +1737,60 @@ static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx, return 0; } +static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match); + +static int efx_mae_insert_lhs_action_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule, + u32 prio) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN); + struct efx_tc_lhs_action *act = &rule->lhs_act; + MCDI_DECLARE_STRUCT_PTR(match_crit); + MCDI_DECLARE_STRUCT_PTR(response); + size_t outlen; + int rc; + + match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA); + response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, + MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL); + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(response, MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL), + MAE_ACTION_RULE_RESPONSE_DO_CT, !!act->zone, + MAE_ACTION_RULE_RESPONSE_DO_RECIRC, + act->rid && !act->zone, + MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE, + MAE_CT_VNI_MODE_ZERO, + MAE_ACTION_RULE_RESPONSE_RECIRC_ID, + act->rid ? act->rid->fw_id : 0, + MAE_ACTION_RULE_RESPONSE_CT_DOMAIN, + act->zone ? act->zone->zone : 0); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_COUNTER_ID, + act->count ? act->count->cnt->fw_id : + MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio); + rc = efx_mae_populate_match_criteria(match_crit, &rule->match); + if (rc) + return rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + rule->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID); + return 0; +} + int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule, u32 prio) { + if (rule->is_ar) + return efx_mae_insert_lhs_action_rule(efx, rule, prio); return efx_mae_insert_lhs_outer_rule(efx, rule, prio); } @@ -1770,6 +1824,8 @@ static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx, int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule) { + if (rule->is_ar) + return efx_mae_delete_rule(efx, rule->fw_id); return efx_mae_remove_lhs_outer_rule(efx, rule); } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d23da96273..7657850222 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -2205,10 +2205,9 @@ int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, goto out_free; } - strncpy(desc, + strscpy(desc, MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION), MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)); - desc[MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)] = '\0'; } else { desc[0] = '\0'; } diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index f54200f03e..b04fdbb8ae 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -108,11 +108,17 @@ #define PTP_MIN_LENGTH 63 #define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */ -#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0x01, 0x81} /* ff0e::181 */ + +/* ff0e::181 */ +static const struct in6_addr ptp_addr_ipv6 = { { { + 0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x81 } } }; + +/* 01-1B-19-00-00-00 */ +static const u8 ptp_addr_ether[ETH_ALEN] __aligned(2) = { + 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 }; + #define PTP_EVENT_PORT 319 #define PTP_GENERAL_PORT 320 -#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */ /* Annoyingly the format of the version numbers are different between * versions 1 and 2 so it isn't possible to simply look for 1 or 2. @@ -1296,7 +1302,7 @@ static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx, static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, struct list_head *filter_list, - struct in6_addr *addr, u16 port, + const struct in6_addr *addr, u16 port, unsigned long expiry) { struct efx_filter_spec spec; @@ -1309,11 +1315,10 @@ static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, static int efx_ptp_insert_eth_multicast_filter(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; - const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER; struct efx_filter_spec spec; efx_ptp_init_filter(efx, &spec); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, ptp_addr_ether); spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; spec.ether_type = htons(ETH_P_1588); return efx_ptp_insert_filter(efx, &ptp->rxfilters_mcast, &spec, 0); @@ -1346,15 +1351,13 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) * PTP over IPv6 and Ethernet */ if (efx_ptp_use_mac_tx_timestamps(efx)) { - struct in6_addr ipv6_addr = {{PTP_ADDR_IPV6}}; - rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, - &ipv6_addr, PTP_EVENT_PORT, 0); + &ptp_addr_ipv6, PTP_EVENT_PORT, 0); if (rc < 0) goto fail; rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, - &ipv6_addr, PTP_GENERAL_PORT, 0); + &ptp_addr_ipv6, PTP_GENERAL_PORT, 0); if (rc < 0) goto fail; @@ -1379,9 +1382,7 @@ static bool efx_ptp_valid_unicast_event_pkt(struct sk_buff *skb) ip_hdr(skb)->protocol == IPPROTO_UDP && udp_hdr(skb)->source == htons(PTP_EVENT_PORT); } else if (skb->protocol == htons(ETH_P_IPV6)) { - struct in6_addr mcast_addr = {{PTP_ADDR_IPV6}}; - - return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &mcast_addr) && + return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &ptp_addr_ipv6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP && udp_hdr(skb)->source == htons(PTP_EVENT_PORT); } diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 1776f7f8a7..a7346e965b 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush_map(); + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 30ebef8824..82e8891a61 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -642,6 +642,15 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx, return -EEXIST; } break; + case EFX_TC_EM_PSEUDO_OR: + /* old EM corresponds to an OR that has to be unique + * (it must not overlap with any other OR, whether + * direct-EM or pseudo). + */ + NL_SET_ERR_MSG_FMT_MOD(extack, + "%s encap match conflicts with existing pseudo(OR) entry", + em_type ? "Pseudo" : "Direct"); + return -EEXIST; default: /* Unrecognised pseudo-type. Just say no */ NL_SET_ERR_MSG_FMT_MOD(extack, "%s encap match conflicts with existing pseudo(%d) entry", @@ -872,6 +881,93 @@ static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr, return false; } +/* A foreign LHS rule has matches on enc_ keys at the TC layer (including an + * implied match on enc_ip_proto UDP). Translate these into non-enc_ keys, + * so that we can use the same MAE machinery as local LHS rules (and so that + * the lhs_rules entries have uniform semantics). It may seem odd to do it + * this way round, given that the corresponding fields in the MAE MCDIs are + * all ENC_, but (a) we don't have enc_L2 or enc_ip_proto in struct + * efx_tc_match_fields and (b) semantically an LHS rule doesn't have inner + * fields so it's just matching on *the* header rather than the outer header. + * Make sure that the non-enc_ keys were not already being matched on, as that + * would imply a rule that needed a triple lookup. (Hardware can do that, + * with OR-AR-CT-AR, but it halves packet rate so we avoid it where possible; + * see efx_tc_flower_flhs_needs_ar().) + */ +static int efx_tc_flower_translate_flhs_match(struct efx_tc_match *match) +{ + int rc = 0; + +#define COPY_MASK_AND_VALUE(_key, _ekey) ({ \ + if (match->mask._key) { \ + rc = -EOPNOTSUPP; \ + } else { \ + match->mask._key = match->mask._ekey; \ + match->mask._ekey = 0; \ + match->value._key = match->value._ekey; \ + match->value._ekey = 0; \ + } \ + rc; \ +}) +#define COPY_FROM_ENC(_key) COPY_MASK_AND_VALUE(_key, enc_##_key) + if (match->mask.ip_proto) + return -EOPNOTSUPP; + match->mask.ip_proto = ~0; + match->value.ip_proto = IPPROTO_UDP; + if (COPY_FROM_ENC(src_ip) || COPY_FROM_ENC(dst_ip)) + return rc; +#ifdef CONFIG_IPV6 + if (!ipv6_addr_any(&match->mask.src_ip6)) + return -EOPNOTSUPP; + match->mask.src_ip6 = match->mask.enc_src_ip6; + memset(&match->mask.enc_src_ip6, 0, sizeof(struct in6_addr)); + if (!ipv6_addr_any(&match->mask.dst_ip6)) + return -EOPNOTSUPP; + match->mask.dst_ip6 = match->mask.enc_dst_ip6; + memset(&match->mask.enc_dst_ip6, 0, sizeof(struct in6_addr)); +#endif + if (COPY_FROM_ENC(ip_tos) || COPY_FROM_ENC(ip_ttl)) + return rc; + /* should really copy enc_ip_frag but we don't have that in + * parse_match yet + */ + if (COPY_MASK_AND_VALUE(l4_sport, enc_sport) || + COPY_MASK_AND_VALUE(l4_dport, enc_dport)) + return rc; + return 0; +#undef COPY_FROM_ENC +#undef COPY_MASK_AND_VALUE +} + +/* If a foreign LHS rule wants to match on keys that are only available after + * encap header identification and parsing, then it can't be done in the Outer + * Rule lookup, because that lookup determines the encap type used to parse + * beyond the outer headers. Thus, such rules must use the OR-AR-CT-AR lookup + * sequence, with an EM (struct efx_tc_encap_match) in the OR step. + * Return true iff the passed match requires this. + */ +static bool efx_tc_flower_flhs_needs_ar(struct efx_tc_match *match) +{ + /* matches on inner-header keys can't be done in OR */ + return match->mask.eth_proto || + match->mask.vlan_tci[0] || match->mask.vlan_tci[1] || + match->mask.vlan_proto[0] || match->mask.vlan_proto[1] || + memchr_inv(match->mask.eth_saddr, 0, ETH_ALEN) || + memchr_inv(match->mask.eth_daddr, 0, ETH_ALEN) || + match->mask.ip_proto || + match->mask.ip_tos || match->mask.ip_ttl || + match->mask.src_ip || match->mask.dst_ip || +#ifdef CONFIG_IPV6 + !ipv6_addr_any(&match->mask.src_ip6) || + !ipv6_addr_any(&match->mask.dst_ip6) || +#endif + match->mask.ip_frag || match->mask.ip_firstfrag || + match->mask.l4_sport || match->mask.l4_dport || + match->mask.tcp_flags || + /* nor can VNI */ + match->mask.enc_keyid; +} + static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, struct flow_cls_offload *tc, struct flow_rule *fr, @@ -882,9 +978,12 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, struct netlink_ext_ack *extack = tc->common.extack; struct efx_tc_lhs_action *act = &rule->lhs_act; const struct flow_action_entry *fa; + enum efx_tc_counter_type ctype; bool pipe = true; int i; + ctype = rule->is_ar ? EFX_TC_COUNTER_TYPE_AR : EFX_TC_COUNTER_TYPE_OR; + flow_action_for_each(i, fa, &fr->action) { struct efx_tc_ct_zone *ct_zone; struct efx_tc_recirc_id *rid; @@ -917,7 +1016,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, return -EOPNOTSUPP; } cnt = efx_tc_flower_get_counter_index(efx, tc->cookie, - EFX_TC_COUNTER_TYPE_OR); + ctype); if (IS_ERR(cnt)) { NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter"); return PTR_ERR(cnt); @@ -1354,6 +1453,222 @@ static int efx_tc_incomplete_mangle(struct efx_tc_mangler_state *mung, return 0; } +static int efx_tc_flower_replace_foreign_lhs_ar(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + int rc; + + type = efx_tc_indr_netdev_type(net_dev); + if (type == EFX_ENCAP_TYPE_NONE) { + NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Firmware reports no support for %s encap match", + efx_tc_encap_type_name(type)); + return rc; + } + /* This is an Action Rule, so it needs a separate Encap Match in the + * Outer Rule table. Insert that now. + */ + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_DIRECT, 0, 0, extack); + if (rc) + return rc; + + match->mask.recirc_id = 0xff; + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + /* We must inhibit match on TCP SYN/FIN/RST, so that SW can see + * the packet and update the conntrack table. + * Outer Rules will do that with CT_TCP_FLAGS_INHIBIT, but Action + * Rules don't have that; instead they support matching on + * TCP_SYN_FIN_RST (aka TCP_INTERESTING_FLAGS), so use that. + * This is only strictly needed if there will be a DO_CT action, + * which we don't know yet, but typically there will be and it's + * simpler not to bother checking here. + */ + match->mask.tcp_syn_fin_rst = true; + + rc = efx_mae_match_check_caps(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + rule->is_ar = true; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + +static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + int rc; + + if (tc->common.chain_index) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0"); + return -EOPNOTSUPP; + } + + if (!efx_tc_match_is_encap(&match->mask)) { + /* This is not a tunnel decap rule, ignore it */ + netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign LHS filter without encap match\n"); + return -EOPNOTSUPP; + } + + if (efx_tc_flower_flhs_needs_ar(match)) + return efx_tc_flower_replace_foreign_lhs_ar(efx, tc, fr, match, + net_dev); + + type = efx_tc_indr_netdev_type(net_dev); + if (type == EFX_ENCAP_TYPE_NONE) { + NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Firmware reports no support for %s encap match", + efx_tc_encap_type_name(type)); + return rc; + } + /* Reserve the outer tuple with a pseudo Encap Match */ + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_PSEUDO_OR, 0, 0, + extack); + if (rc) + return rc; + + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + + rc = efx_tc_flower_translate_flhs_match(match); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule cannot match on inner fields"); + goto release_encap_match; + } + + rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + static int efx_tc_flower_replace_foreign(struct efx_nic *efx, struct net_device *net_dev, struct flow_cls_offload *tc) @@ -1371,7 +1686,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, /* Parse match */ memset(&match, 0, sizeof(match)); - rc = efx_tc_flower_parse_match(efx, fr, &match, NULL); + rc = efx_tc_flower_parse_match(efx, fr, &match, extack); if (rc) return rc; /* The rule as given to us doesn't specify a source netdevice. @@ -1387,6 +1702,10 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, match.value.ingress_port = rc; match.mask.ingress_port = ~0; + if (efx_tc_rule_is_lhs_rule(fr, &match)) + return efx_tc_flower_replace_foreign_lhs(efx, tc, fr, &match, + net_dev); + if (tc->common.chain_index) { struct efx_tc_recirc_id *rid; @@ -1416,6 +1735,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (match.mask.ct_state_est && !match.value.ct_state_est) { if (match.value.tcp_syn_fin_rst) { /* Can't offload this combination */ + NL_SET_ERR_MSG_MOD(extack, "TCP flags and -est conflict for offload"); rc = -EOPNOTSUPP; goto release; } @@ -1442,7 +1762,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, goto release; } - rc = efx_mae_match_check_caps(efx, &match.mask, NULL); + rc = efx_mae_match_check_caps(efx, &match.mask, extack); if (rc) goto release; @@ -1470,7 +1790,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, extack); if (rc) goto release; - } else { + } else if (!tc->common.chain_index) { /* This is not a tunnel decap rule, ignore it */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter without encap match\n"); @@ -1530,6 +1850,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, goto release; } if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { + NL_SET_ERR_MSG_MOD(extack, "Count action violates action order (can't happen)"); rc = -EOPNOTSUPP; goto release; } @@ -2136,6 +2457,14 @@ static int efx_tc_flower_replace(struct efx_nic *efx, NL_SET_ERR_MSG_MOD(extack, "Cannot offload tunnel decap action without tunnel device"); rc = -EOPNOTSUPP; goto release; + case FLOW_ACTION_CT: + if (fa->ct.action != TCA_CT_ACT_NAT) { + rc = -EOPNOTSUPP; + NL_SET_ERR_MSG_FMT_MOD(extack, "Can only offload CT 'nat' action in RHS rules, not %d", fa->ct.action); + goto release; + } + act->do_nat = 1; + break; default: NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", fa->id); diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h index 4dd2c378fd..7b5190078b 100644 --- a/drivers/net/ethernet/sfc/tc.h +++ b/drivers/net/ethernet/sfc/tc.h @@ -48,6 +48,7 @@ struct efx_tc_encap_action; /* see tc_encap_actions.h */ * @vlan_push: the number of vlan headers to push * @vlan_pop: the number of vlan headers to pop * @decap: used to indicate a tunnel header decapsulation should take place + * @do_nat: perform NAT/NPT with values returned by conntrack match * @do_ttl_dec: used to indicate IP TTL / Hop Limit should be decremented * @deliver: used to indicate a deliver action should take place * @vlan_tci: tci fields for vlan push actions @@ -68,6 +69,7 @@ struct efx_tc_action_set { u16 vlan_push:2; u16 vlan_pop:2; u16 decap:1; + u16 do_nat:1; u16 do_ttl_dec:1; u16 deliver:1; __be16 vlan_tci[2]; @@ -140,10 +142,14 @@ static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask) * The pseudo encap match may be referenced again by an encap match * with different values for these fields, but all masks must match the * first (stored in our child_* fields). + * @EFX_TC_EM_PSEUDO_OR: registered by an fLHS rule that fits in the OR + * table. The &struct efx_tc_lhs_rule already holds the HW OR entry. + * Only one reference to this encap match may exist. */ enum efx_tc_em_pseudo_type { EFX_TC_EM_DIRECT, EFX_TC_EM_PSEUDO_MASK, + EFX_TC_EM_PSEUDO_OR, }; struct efx_tc_encap_match { @@ -183,6 +189,7 @@ struct efx_tc_action_set_list { }; struct efx_tc_lhs_action { + enum efx_encap_type tun_type; struct efx_tc_recirc_id *rid; struct efx_tc_ct_zone *zone; struct efx_tc_counter_index *count; @@ -203,6 +210,7 @@ struct efx_tc_lhs_rule { struct efx_tc_lhs_action lhs_act; struct rhash_head linkage; u32 fw_id; + bool is_ar; /* Action Rule (for OR-AR-CT-AR sequence) */ }; enum efx_tc_rule_prios { diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c index 44bb576703..d90206f271 100644 --- a/drivers/net/ethernet/sfc/tc_conntrack.c +++ b/drivers/net/ethernet/sfc/tc_conntrack.c @@ -276,10 +276,84 @@ static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr, return 0; } +/** + * struct efx_tc_ct_mangler_state - tracks which fields have been pedited + * + * @ipv4: IP source or destination addr has been set + * @tcpudp: TCP/UDP source or destination port has been set + */ +struct efx_tc_ct_mangler_state { + u8 ipv4:1; + u8 tcpudp:1; +}; + +static int efx_tc_ct_mangle(struct efx_nic *efx, struct efx_tc_ct_entry *conn, + const struct flow_action_entry *fa, + struct efx_tc_ct_mangler_state *mung) +{ + /* Is this the first mangle we've processed for this rule? */ + bool first = !(mung->ipv4 || mung->tcpudp); + bool dnat = false; + + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, daddr): + dnat = true; + fallthrough; + case offsetof(struct iphdr, saddr): + if (fa->mangle.mask) + return -EOPNOTSUPP; + conn->nat_ip = htonl(fa->mangle.val); + mung->ipv4 = 1; + break; + default: + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + /* Both struct tcphdr and struct udphdr start with + * __be16 source; + * __be16 dest; + * so we can use the same code for both. + */ + switch (fa->mangle.offset) { + case offsetof(struct tcphdr, dest): + BUILD_BUG_ON(offsetof(struct tcphdr, dest) != + offsetof(struct udphdr, dest)); + dnat = true; + fallthrough; + case offsetof(struct tcphdr, source): + BUILD_BUG_ON(offsetof(struct tcphdr, source) != + offsetof(struct udphdr, source)); + if (~fa->mangle.mask != 0xffff) + return -EOPNOTSUPP; + conn->l4_natport = htons(fa->mangle.val); + mung->tcpudp = 1; + break; + default: + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; + } + /* first mangle tells us whether this is SNAT or DNAT; + * subsequent mangles must match that + */ + if (first) + conn->dnat = dnat; + else if (conn->dnat != dnat) + return -EOPNOTSUPP; + return 0; +} + static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, struct flow_cls_offload *tc) { struct flow_rule *fr = flow_cls_offload_flow_rule(tc); + struct efx_tc_ct_mangler_state mung = {}; struct efx_tc_ct_entry *conn, *old; struct efx_nic *efx = ct_zone->efx; const struct flow_action_entry *fa; @@ -326,6 +400,17 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, goto release; } break; + case FLOW_ACTION_MANGLE: + if (conn->eth_proto != htons(ETH_P_IP)) { + netif_dbg(efx, drv, efx->net_dev, + "NAT only supported for IPv4\n"); + rc = -EOPNOTSUPP; + goto release; + } + rc = efx_tc_ct_mangle(efx, conn, fa, &mung); + if (rc) + goto release; + break; default: netif_dbg(efx, drv, efx->net_dev, "Unhandled action %u for conntrack\n", fa->id); @@ -335,8 +420,10 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, } /* fill in defaults for unmangled values */ - conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip; - conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport; + if (!mung.ipv4) + conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip; + if (!mung.tcpudp) + conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport; cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT); if (IS_ERR(cnt)) { diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 8fc3f5272f..98d0b561a0 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -962,7 +962,7 @@ out_free: return err; } -static int ioc3eth_remove(struct platform_device *pdev) +static void ioc3eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ioc3_private *ip = netdev_priv(dev); @@ -973,8 +973,6 @@ static int ioc3eth_remove(struct platform_device *pdev) unregister_netdev(dev); del_timer_sync(&ip->ioc3_timer); free_netdev(dev); - - return 0; } @@ -1275,7 +1273,7 @@ static void ioc3_set_multicast_list(struct net_device *dev) static struct platform_driver ioc3eth_driver = { .probe = ioc3eth_probe, - .remove = ioc3eth_remove, + .remove_new = ioc3eth_remove, .driver = { .name = "ioc3-eth", } diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 6d850ea2b9..18b6f93d87 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -854,19 +854,17 @@ static int meth_probe(struct platform_device *pdev) return 0; } -static int meth_remove(struct platform_device *pdev) +static void meth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); free_netdev(dev); - - return 0; } static struct platform_driver meth_driver = { .probe = meth_probe, - .remove = meth_remove, + .remove_new = meth_remove, .driver = { .name = "meth", } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 032eccf8eb..7583476165 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2411,7 +2411,7 @@ static int smc_drv_probe(struct platform_device *pdev) return ret; } -static int smc_drv_remove(struct platform_device *pdev) +static void smc_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct smc_local *lp = netdev_priv(ndev); @@ -2436,8 +2436,6 @@ static int smc_drv_remove(struct platform_device *pdev) release_mem_region(res->start, SMC_IO_EXTENT); free_netdev(ndev); - - return 0; } static int smc_drv_suspend(struct device *dev) @@ -2480,7 +2478,7 @@ static const struct dev_pm_ops smc_drv_pm_ops = { static struct platform_driver smc_driver = { .probe = smc_drv_probe, - .remove = smc_drv_remove, + .remove_new = smc_drv_remove, .driver = { .name = CARDNAME, .pm = &smc_drv_pm_ops, diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index c521ea8f94..46eee747c6 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -114,25 +114,6 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg, (lp)->cfg.pxa_u16_align4) -#elif defined(CONFIG_SH_SH4202_MICRODEV) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 - -#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) -#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) -#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000) -#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) -#define SMC_outw(lp, v, a, r) outw(v, (a) + (r) - 0xa0000000) -#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000) -#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l) -#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l) -#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) -#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) - -#define SMC_IRQ_FLAGS (0) - #elif defined(CONFIG_ATARI) #define SMC_CAN_USE_8BIT 1 diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index cb590db625..31cb7d0166 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2314,7 +2314,7 @@ static int smsc911x_init(struct net_device *dev) return 0; } -static int smsc911x_drv_remove(struct platform_device *pdev) +static void smsc911x_drv_remove(struct platform_device *pdev) { struct net_device *dev; struct smsc911x_data *pdata; @@ -2348,8 +2348,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev) free_netdev(dev); pm_runtime_disable(&pdev->dev); - - return 0; } /* standard register acces */ @@ -2668,7 +2666,7 @@ MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match); static struct platform_driver smsc911x_driver = { .probe = smsc911x_drv_probe, - .remove = smsc911x_drv_remove, + .remove_new = smsc911x_drv_remove, .driver = { .name = SMSC_CHIPNAME, .pm = SMSC911X_PM_OPS, diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index f358ea0031..0891e9e49e 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -780,7 +780,7 @@ static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res, u16 pkts) { if (xdp_res & NETSEC_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_res & NETSEC_XDP_TX) netsec_xdp_ring_tx_db(priv, pkts); @@ -2150,7 +2150,7 @@ free_ndev: return ret; } -static int netsec_remove(struct platform_device *pdev) +static void netsec_remove(struct platform_device *pdev) { struct netsec_priv *priv = platform_get_drvdata(pdev); @@ -2162,8 +2162,6 @@ static int netsec_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); free_netdev(priv->ndev); - - return 0; } #ifdef CONFIG_PM @@ -2211,7 +2209,7 @@ MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); static struct platform_driver netsec_driver = { .probe = netsec_probe, - .remove = netsec_remove, + .remove_new = netsec_remove, .driver = { .name = "netsec", .pm = &netsec_pm_ops, diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 4838d2383a..eed24e67c5 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1719,7 +1719,7 @@ out_del_napi: return ret; } -static int ave_remove(struct platform_device *pdev) +static void ave_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ave_private *priv = netdev_priv(ndev); @@ -1727,8 +1727,6 @@ static int ave_remove(struct platform_device *pdev) unregister_netdev(ndev); netif_napi_del(&priv->napi_rx); netif_napi_del(&priv->napi_tx); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -1976,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, of_ave_match); static struct platform_driver ave_driver = { .probe = ave_probe, - .remove = ave_remove, + .remove_new = ave_remove, .driver = { .name = "ave", .pm = AVE_PM_OPS, diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 25f2d42de4..85dcda51df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -239,6 +239,17 @@ config DWMAC_INTEL_PLAT the stmmac device driver. This driver is used for the Intel Keem Bay SoC. +config DWMAC_LOONGSON1 + tristate "Loongson1 GMAC support" + default MACH_LOONGSON32 + depends on OF && (MACH_LOONGSON32 || COMPILE_TEST) + help + Support for ethernet controller on Loongson1 SoC. + + This selects Loongson1 SoC glue layer support for the stmmac + device driver. This driver is used for Loongson1-based boards + like Loongson LS1B/LS1C. + config DWMAC_TEGRA tristate "NVIDIA Tegra MGBE support" depends on ARCH_TEGRA || COMPILE_TEST diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 5b57aee192..80e598bd42 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o +obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o obj-$(CONFIG_DWMAC_TEGRA) += dwmac-tegra.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1e996c2904..c75c64a938 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -59,28 +59,51 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +struct stmmac_q_tx_stats { + u64_stats_t tx_bytes; + u64_stats_t tx_set_ic_bit; + u64_stats_t tx_tso_frames; + u64_stats_t tx_tso_nfrags; +}; + +struct stmmac_napi_tx_stats { + u64_stats_t tx_packets; + u64_stats_t tx_pkt_n; + u64_stats_t poll; + u64_stats_t tx_clean; + u64_stats_t tx_set_ic_bit; +}; + struct stmmac_txq_stats { - u64 tx_bytes; - u64 tx_packets; - u64 tx_pkt_n; - u64 tx_normal_irq_n; - u64 napi_poll; - u64 tx_clean; - u64 tx_set_ic_bit; - u64 tx_tso_frames; - u64 tx_tso_nfrags; - struct u64_stats_sync syncp; + /* Updates protected by tx queue lock. */ + struct u64_stats_sync q_syncp; + struct stmmac_q_tx_stats q; + + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_tx_stats napi; } ____cacheline_aligned_in_smp; +struct stmmac_napi_rx_stats { + u64_stats_t rx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_pkt_n; + u64_stats_t poll; +}; + struct stmmac_rxq_stats { - u64 rx_bytes; - u64 rx_packets; - u64 rx_pkt_n; - u64 rx_normal_irq_n; - u64 napi_poll; - struct u64_stats_sync syncp; + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_rx_stats napi; } ____cacheline_aligned_in_smp; +/* Updates on each CPU protected by not allowing nested irqs. */ +struct stmmac_pcpu_stats { + struct u64_stats_sync syncp; + u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES]; + u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES]; +}; + /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ @@ -205,6 +228,7 @@ struct stmmac_extra_stats { /* per queue statistics */ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; + struct stmmac_pcpu_stats __percpu *pcpu_stats; unsigned long rx_dropped; unsigned long rx_errors; unsigned long tx_dropped; @@ -216,6 +240,7 @@ struct stmmac_safety_stats { unsigned long mac_errors[32]; unsigned long mtl_errors[32]; unsigned long dma_errors[32]; + unsigned long dma_dpp_errors[32]; }; /* Number of fields in Safety Stats */ @@ -293,7 +318,7 @@ struct stmmac_safety_stats { #define MIN_DMA_RIWT 0x10 #define DEF_DMA_RIWT 0xa0 /* Tx coalesce parameters */ -#define STMMAC_COAL_TX_TIMER 1000 +#define STMMAC_COAL_TX_TIMER 5000 #define STMMAC_MAX_COAL_TX_TICK 100000 #define STMMAC_TX_MAX_FRAMES 256 #define STMMAC_TX_FRAMES 25 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 58a7f08e8d..643ee6d8d4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev) if (IS_ERR(gmac)) return PTR_ERR(gmac); - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -124,13 +124,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev) anarion_gmac_init(pdev, gmac); plat_dat->bsp_priv = gmac; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) { - stmmac_remove_config_dt(pdev, plat_dat); - return ret; - } - - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id anarion_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 61ebf36da1..ec924c6c76 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -435,15 +435,14 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(stmmac_res.addr)) return PTR_ERR(stmmac_res.addr); - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); ret = data->probe(pdev, plat_dat, &stmmac_res); if (ret < 0) { dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n"); - - goto remove_config; + return ret; } ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); @@ -458,25 +457,17 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) remove: data->remove(pdev); -remove_config: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } static void dwc_eth_dwmac_remove(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - const struct dwc_eth_dwmac_data *data; - - data = device_get_match_data(&pdev->dev); + const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev); stmmac_dvr_remove(&pdev->dev); data->remove(pdev); - - stmmac_remove_config_dt(pdev, priv->plat); } static const struct of_device_id dwc_eth_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index 20fc455b33..598eff9268 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) return ret; if (pdev->dev.of_node) { - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); @@ -46,17 +46,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) plat_dat->unicast_filter_entries = 1; } - ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - if (pdev->dev.of_node) - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } static const struct of_device_id dwmac_generic_match[] = { @@ -77,7 +67,6 @@ MODULE_DEVICE_TABLE(of, dwmac_generic_match); static struct platform_driver dwmac_generic_driver = { .probe = dwmac_generic_probe, - .remove_new = stmmac_pltfr_remove, .driver = { .name = STMMAC_RESOURCE_NAME, .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index df34e34cc1..8f730ada71 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -331,15 +331,14 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); data = of_device_get_match_data(&pdev->dev); if (!data) { dev_err(&pdev->dev, "failed to get match data\n"); - ret = -EINVAL; - goto err_match_data; + return -EINVAL; } dwmac->ops = data; @@ -348,7 +347,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) ret = imx_dwmac_parse_dt(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "failed to parse OF data\n"); - goto err_parse_dt; + return ret; } if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) @@ -365,7 +364,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) ret = imx_dwmac_clks_config(dwmac, true); if (ret) - goto err_clks_config; + return ret; ret = imx_dwmac_init(pdev, dwmac); if (ret) @@ -385,10 +384,6 @@ err_drv_probe: imx_dwmac_exit(pdev, plat_dat->bsp_priv); err_dwmac_init: imx_dwmac_clks_config(dwmac, false); -err_clks_config: -err_parse_dt: -err_match_data: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index 0a20c3d247..19c93b998f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -241,29 +241,25 @@ static int ingenic_mac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); mac = devm_kzalloc(&pdev->dev, sizeof(*mac), GFP_KERNEL); - if (!mac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!mac) + return -ENOMEM; data = of_device_get_match_data(&pdev->dev); if (!data) { dev_err(&pdev->dev, "No of match data provided\n"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } /* Get MAC PHY control register */ mac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mode-reg"); if (IS_ERR(mac->regmap)) { dev_err(&pdev->dev, "%s: Failed to get syscon regmap\n", __func__); - ret = PTR_ERR(mac->regmap); - goto err_remove_config_dt; + return PTR_ERR(mac->regmap); } if (!of_property_read_u32(pdev->dev.of_node, "tx-clk-delay-ps", &tx_delay_ps)) { @@ -272,8 +268,7 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->tx_delay = tx_delay_ps * 1000; } else { dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } @@ -283,8 +278,7 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->rx_delay = rx_delay_ps * 1000; } else { dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } @@ -295,18 +289,9 @@ static int ingenic_mac_probe(struct platform_device *pdev) ret = ingenic_mac_init(plat_dat); if (ret) - goto err_remove_config_dt; - - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); + return ret; - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index d352a14f9d..d68f0c4e78 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -7,8 +7,8 @@ #include #include #include -#include #include +#include #include #include "dwmac4.h" @@ -76,7 +76,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; - const struct of_device_id *match; struct intel_dwmac *dwmac; unsigned long rate; int ret; @@ -85,35 +84,29 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); } dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->dev = &pdev->dev; dwmac->tx_clk = NULL; - match = of_match_device(intel_eth_plat_match, &pdev->dev); - if (match && match->data) { - dwmac->data = (const struct intel_dwmac_data *)match->data; - + dwmac->data = device_get_match_data(&pdev->dev); + if (dwmac->data) { if (dwmac->data->fix_mac_speed) plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed; /* Enable TX clock */ if (dwmac->data->tx_clk_en) { dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); - if (IS_ERR(dwmac->tx_clk)) { - ret = PTR_ERR(dwmac->tx_clk); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->tx_clk)) + return PTR_ERR(dwmac->tx_clk); clk_prepare_enable(dwmac->tx_clk); @@ -126,7 +119,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set tx_clk\n"); - goto err_remove_config_dt; + return ret; } } } @@ -140,7 +133,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set clk_ptp_ref\n"); - goto err_remove_config_dt; + return ret; } } } @@ -158,15 +151,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev) ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) { clk_disable_unprepare(dwmac->tx_clk); - goto err_remove_config_dt; + return ret; } return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; } static void intel_eth_plat_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index a3a249c635..60283543ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -605,7 +605,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; plat->int_snapshot_num = AUX_SNAPSHOT1; - plat->ext_snapshot_num = AUX_SNAPSHOT0; plat->crosststamp = intel_crosststamp; plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 9b02007491..281687d708 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -384,22 +384,20 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) if (val) return val; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) { - err = -ENOMEM; - goto err_remove_config_dt; - } + if (!gmac) + return -ENOMEM; gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); if (err) { dev_err(dev, "device tree parsing error\n"); - goto err_remove_config_dt; + return err; } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, @@ -459,11 +457,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { err = ipq806x_gmac_configure_qsgmii_params(gmac); if (err) - goto err_remove_config_dt; + return err; err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac); if (err) - goto err_remove_config_dt; + return err; } plat_dat->has_gmac = true; @@ -473,21 +471,12 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->tx_fifo_size = 8192; plat_dat->rx_fifo_size = 8192; - err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (err) - goto err_remove_config_dt; - - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); err_unsupported_phy: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - err = -EINVAL; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return err; + return -EINVAL; } static const struct of_device_id ipq806x_gmac_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c new file mode 100644 index 0000000000..3e86810717 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Loongson-1 DWMAC glue layer + * + * Copyright (C) 2011-2023 Keguang Zhang + */ + +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +#define LS1B_GMAC0_BASE (0x1fe10000) +#define LS1B_GMAC1_BASE (0x1fe20000) + +/* Loongson-1 SYSCON Registers */ +#define LS1X_SYSCON0 (0x0) +#define LS1X_SYSCON1 (0x4) + +/* Loongson-1B SYSCON Register Bits */ +#define GMAC1_USE_UART1 BIT(4) +#define GMAC1_USE_UART0 BIT(3) + +#define GMAC1_SHUT BIT(13) +#define GMAC0_SHUT BIT(12) + +#define GMAC1_USE_TXCLK BIT(3) +#define GMAC0_USE_TXCLK BIT(2) +#define GMAC1_USE_PWM23 BIT(1) +#define GMAC0_USE_PWM01 BIT(0) + +/* Loongson-1C SYSCON Register Bits */ +#define GMAC_SHUT BIT(6) + +#define PHY_INTF_SELI GENMASK(30, 28) +#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0) +#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4) + +struct ls1x_dwmac { + struct plat_stmmacenet_data *plat_dat; + struct regmap *regmap; +}; + +static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) +{ + struct ls1x_dwmac *dwmac = priv; + struct plat_stmmacenet_data *plat = dwmac->plat_dat; + struct regmap *regmap = dwmac->regmap; + struct resource *res; + unsigned long reg_base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Could not get IO_MEM resources\n"); + return -EINVAL; + } + reg_base = (unsigned long)res->start; + + if (reg_base == LS1B_GMAC0_BASE) { + switch (plat->phy_interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + regmap_update_bits(regmap, LS1X_SYSCON0, + GMAC0_USE_TXCLK | GMAC0_USE_PWM01, + 0); + break; + case PHY_INTERFACE_MODE_MII: + regmap_update_bits(regmap, LS1X_SYSCON0, + GMAC0_USE_TXCLK | GMAC0_USE_PWM01, + GMAC0_USE_TXCLK | GMAC0_USE_PWM01); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode %u\n", + plat->phy_interface); + return -EOPNOTSUPP; + } + + regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0); + } else if (reg_base == LS1B_GMAC1_BASE) { + regmap_update_bits(regmap, LS1X_SYSCON0, + GMAC1_USE_UART1 | GMAC1_USE_UART0, + GMAC1_USE_UART1 | GMAC1_USE_UART0); + + switch (plat->phy_interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + regmap_update_bits(regmap, LS1X_SYSCON1, + GMAC1_USE_TXCLK | GMAC1_USE_PWM23, + 0); + + break; + case PHY_INTERFACE_MODE_MII: + regmap_update_bits(regmap, LS1X_SYSCON1, + GMAC1_USE_TXCLK | GMAC1_USE_PWM23, + GMAC1_USE_TXCLK | GMAC1_USE_PWM23); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode %u\n", + plat->phy_interface); + return -EOPNOTSUPP; + } + + regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0); + } else { + dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx", + reg_base); + return -EINVAL; + } + + return 0; +} + +static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv) +{ + struct ls1x_dwmac *dwmac = priv; + struct plat_stmmacenet_data *plat = dwmac->plat_dat; + struct regmap *regmap = dwmac->regmap; + + switch (plat->phy_interface) { + case PHY_INTERFACE_MODE_MII: + regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, + PHY_INTF_MII); + break; + case PHY_INTERFACE_MODE_RMII: + regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, + PHY_INTF_RMII); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY-mode %u\n", + plat->phy_interface); + return -EOPNOTSUPP; + } + + regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0); + + return 0; +} + +static int ls1x_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct regmap *regmap; + struct ls1x_dwmac *dwmac; + int (*init)(struct platform_device *pdev, void *priv); + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + /* Probe syscon */ + regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "loongson,ls1-syscon"); + if (IS_ERR(regmap)) + return dev_err_probe(&pdev->dev, PTR_ERR(regmap), + "Unable to find syscon\n"); + + init = of_device_get_match_data(&pdev->dev); + if (!init) { + dev_err(&pdev->dev, "No of match data provided\n"); + return -EINVAL; + } + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return -ENOMEM; + + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat), + "dt configuration failed\n"); + + plat_dat->bsp_priv = dwmac; + plat_dat->init = init; + dwmac->plat_dat = plat_dat; + dwmac->regmap = regmap; + + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); +} + +static const struct of_device_id ls1x_dwmac_match[] = { + { + .compatible = "loongson,ls1b-gmac", + .data = &ls1b_dwmac_syscon_init, + }, + { + .compatible = "loongson,ls1c-emac", + .data = &ls1c_dwmac_syscon_init, + }, + { } +}; +MODULE_DEVICE_TABLE(of, ls1x_dwmac_match); + +static struct platform_driver ls1x_dwmac_driver = { + .probe = ls1x_dwmac_probe, + .driver = { + .name = "loongson1-dwmac", + .of_match_table = ls1x_dwmac_match, + }, +}; +module_platform_driver(ls1x_dwmac_driver); + +MODULE_AUTHOR("Keguang Zhang "); +MODULE_DESCRIPTION("Loongson-1 DWMAC glue layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index d0aa674ce7..4c810d8f5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -46,8 +46,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); if (IS_ERR(reg)) { dev_err(&pdev->dev, "syscon lookup failed\n"); - ret = PTR_ERR(reg); - goto err_remove_config_dt; + return PTR_ERR(reg); } if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) { @@ -56,23 +55,13 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; } else { dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } regmap_update_bits(reg, LPC18XX_CREG_CREG6, LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id lpc18xx_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index cd796ec041..2a9132d6d7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -656,7 +656,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -665,7 +665,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) ret = mediatek_dwmac_clks_config(priv_plat, true); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -675,8 +675,6 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) err_drv_probe: mediatek_dwmac_clks_config(priv_plat, false); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 959f88c6da..a16bfa9089 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -52,35 +52,22 @@ static int meson6_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->reg = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(dwmac->reg)) { - ret = PTR_ERR(dwmac->reg); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->reg)) + return PTR_ERR(dwmac->reg); plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id meson6_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 0b159dc0d5..b23944aa34 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -400,33 +400,27 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->data = (const struct meson8b_dwmac_data *) of_device_get_match_data(&pdev->dev); - if (!dwmac->data) { - ret = -EINVAL; - goto err_remove_config_dt; - } + if (!dwmac->data) + return -EINVAL; dwmac->regs = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(dwmac->regs)) { - ret = PTR_ERR(dwmac->regs); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->regs)) + return PTR_ERR(dwmac->regs); dwmac->dev = &pdev->dev; ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode); if (ret) { dev_err(&pdev->dev, "missing phy-mode property\n"); - goto err_remove_config_dt; + return ret; } /* use 2ns as fallback since this value was previously hardcoded */ @@ -448,53 +442,40 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) { dev_err(dwmac->dev, "The RGMII RX delay range is 0..3000ps in 200ps steps"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } else { if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) { dev_err(dwmac->dev, "The only allowed RGMII RX delays values are: 0ps, 2000ps"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev, "timing-adjustment"); - if (IS_ERR(dwmac->timing_adj_clk)) { - ret = PTR_ERR(dwmac->timing_adj_clk); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->timing_adj_clk)) + return PTR_ERR(dwmac->timing_adj_clk); ret = meson8b_init_rgmii_delays(dwmac); if (ret) - goto err_remove_config_dt; + return ret; ret = meson8b_init_rgmii_tx_clk(dwmac); if (ret) - goto err_remove_config_dt; + return ret; ret = dwmac->data->set_phy_mode(dwmac); if (ret) - goto err_remove_config_dt; + return ret; ret = meson8b_init_prg_eth(dwmac); if (ret) - goto err_remove_config_dt; + return ret; plat_dat->bsp_priv = dwmac; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct meson8b_dwmac_data meson8b_dwmac_data = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index d920a50dd1..382e8de125 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1824,7 +1824,7 @@ static int rk_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -1836,18 +1836,16 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = rk_fix_speed; plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data); - if (IS_ERR(plat_dat->bsp_priv)) { - ret = PTR_ERR(plat_dat->bsp_priv); - goto err_remove_config_dt; - } + if (IS_ERR(plat_dat->bsp_priv)) + return PTR_ERR(plat_dat->bsp_priv); ret = rk_gmac_clk_init(plat_dat); if (ret) - goto err_remove_config_dt; + return ret; ret = rk_gmac_powerup(plat_dat->bsp_priv); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -1857,8 +1855,6 @@ static int rk_gmac_probe(struct platform_device *pdev) err_gmac_powerdown: rk_gmac_powerdown(plat_dat->bsp_priv); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 9bf102bbc6..ba2ce776bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -400,21 +400,19 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp"); if (IS_ERR(dwmac->stmmac_ocp_rst)) { ret = PTR_ERR(dwmac->stmmac_ocp_rst); dev_err(dev, "error getting reset control of ocp %d\n", ret); - goto err_remove_config_dt; + return ret; } reset_control_deassert(dwmac->stmmac_ocp_rst); @@ -422,7 +420,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) ret = socfpga_dwmac_parse_data(dwmac, dev); if (ret) { dev_err(dev, "Unable to parse OF data\n"); - goto err_remove_config_dt; + return ret; } dwmac->ops = ops; @@ -431,7 +429,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - goto err_remove_config_dt; + return ret; ndev = platform_get_drvdata(pdev); stpriv = netdev_priv(ndev); @@ -492,8 +490,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) err_dvr_remove: stmmac_dvr_remove(&pdev->dev); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 9289bb87c3..5d630affb4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -105,7 +105,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, err, "failed to get resources\n"); - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat), "dt configuration failed\n"); @@ -141,13 +141,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev) if (err) return err; - err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (err) { - stmmac_remove_config_dt(pdev, plat_dat); - return err; - } - - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id starfive_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 0d653bbb93..4445cddc4c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -273,20 +273,18 @@ static int sti_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; ret = sti_dwmac_parse_data(dwmac, pdev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - goto err_remove_config_dt; + return ret; } dwmac->fix_retime_src = data->fix_retime_src; @@ -296,7 +294,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) ret = clk_prepare_enable(dwmac->clk); if (ret) - goto err_remove_config_dt; + return ret; ret = sti_dwmac_set_mode(dwmac); if (ret) @@ -310,8 +308,6 @@ static int sti_dwmac_probe(struct platform_device *pdev) disable_clk: clk_disable_unprepare(dwmac->clk); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index a0e276783e..c92dfc4ecf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -98,7 +98,6 @@ struct stm32_dwmac { struct stm32_ops { int (*set_mode)(struct plat_stmmacenet_data *plat_dat); - int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare); int (*suspend)(struct stm32_dwmac *dwmac); void (*resume)(struct stm32_dwmac *dwmac); int (*parse_data)(struct stm32_dwmac *dwmac, @@ -107,62 +106,55 @@ struct stm32_ops { bool clk_rx_enable_in_suspend; }; -static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) +static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) { - struct stm32_dwmac *dwmac = plat_dat->bsp_priv; int ret; - if (dwmac->ops->set_mode) { - ret = dwmac->ops->set_mode(plat_dat); - if (ret) - return ret; - } - ret = clk_prepare_enable(dwmac->clk_tx); if (ret) - return ret; + goto err_clk_tx; - if (!dwmac->ops->clk_rx_enable_in_suspend || - !dwmac->dev->power.is_suspended) { + if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) { ret = clk_prepare_enable(dwmac->clk_rx); - if (ret) { - clk_disable_unprepare(dwmac->clk_tx); - return ret; - } + if (ret) + goto err_clk_rx; } - if (dwmac->ops->clk_prepare) { - ret = dwmac->ops->clk_prepare(dwmac, true); - if (ret) { - clk_disable_unprepare(dwmac->clk_rx); - clk_disable_unprepare(dwmac->clk_tx); - } + ret = clk_prepare_enable(dwmac->syscfg_clk); + if (ret) + goto err_syscfg_clk; + + if (dwmac->enable_eth_ck) { + ret = clk_prepare_enable(dwmac->clk_eth_ck); + if (ret) + goto err_clk_eth_ck; } return ret; + +err_clk_eth_ck: + clk_disable_unprepare(dwmac->syscfg_clk); +err_syscfg_clk: + if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) + clk_disable_unprepare(dwmac->clk_rx); +err_clk_rx: + clk_disable_unprepare(dwmac->clk_tx); +err_clk_tx: + return ret; } -static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) +static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) { - int ret = 0; + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + int ret; - if (prepare) { - ret = clk_prepare_enable(dwmac->syscfg_clk); + if (dwmac->ops->set_mode) { + ret = dwmac->ops->set_mode(plat_dat); if (ret) return ret; - if (dwmac->enable_eth_ck) { - ret = clk_prepare_enable(dwmac->clk_eth_ck); - if (ret) { - clk_disable_unprepare(dwmac->syscfg_clk); - return ret; - } - } - } else { - clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->enable_eth_ck) - clk_disable_unprepare(dwmac->clk_eth_ck); } - return ret; + + return stm32_dwmac_clk_enable(dwmac, resume); } static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) @@ -252,13 +244,15 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) dwmac->ops->syscfg_eth_mask, val << 23); } -static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) +static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend) { clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->clk_rx); + if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend) + clk_disable_unprepare(dwmac->clk_rx); - if (dwmac->ops->clk_prepare) - dwmac->ops->clk_prepare(dwmac, false); + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->enable_eth_ck) + clk_disable_unprepare(dwmac->clk_eth_ck); } static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, @@ -372,21 +366,18 @@ static int stm32_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; data = of_device_get_match_data(&pdev->dev); if (!data) { dev_err(&pdev->dev, "no of match data provided\n"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } dwmac->ops = data; @@ -395,14 +386,14 @@ static int stm32_dwmac_probe(struct platform_device *pdev) ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - goto err_remove_config_dt; + return ret; } plat_dat->bsp_priv = dwmac; - ret = stm32_dwmac_init(plat_dat); + ret = stm32_dwmac_init(plat_dat, false); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -411,9 +402,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev) return 0; err_clk_disable: - stm32_dwmac_clk_disable(dwmac); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); + stm32_dwmac_clk_disable(dwmac, false); return ret; } @@ -426,7 +415,7 @@ static void stm32_dwmac_remove(struct platform_device *pdev) stmmac_dvr_remove(&pdev->dev); - stm32_dwmac_clk_disable(priv->plat->bsp_priv); + stm32_dwmac_clk_disable(dwmac, false); if (dwmac->irq_pwr_wakeup >= 0) { dev_pm_clear_wake_irq(&pdev->dev); @@ -436,18 +425,7 @@ static void stm32_dwmac_remove(struct platform_device *pdev) static int stm32mp1_suspend(struct stm32_dwmac *dwmac) { - int ret = 0; - - ret = clk_prepare_enable(dwmac->clk_ethstp); - if (ret) - return ret; - - clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->enable_eth_ck) - clk_disable_unprepare(dwmac->clk_eth_ck); - - return ret; + return clk_prepare_enable(dwmac->clk_ethstp); } static void stm32mp1_resume(struct stm32_dwmac *dwmac) @@ -455,14 +433,6 @@ static void stm32mp1_resume(struct stm32_dwmac *dwmac) clk_disable_unprepare(dwmac->clk_ethstp); } -static int stm32mcu_suspend(struct stm32_dwmac *dwmac) -{ - clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->clk_rx); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int stm32_dwmac_suspend(struct device *dev) { @@ -473,6 +443,10 @@ static int stm32_dwmac_suspend(struct device *dev) int ret; ret = stmmac_suspend(dev); + if (ret) + return ret; + + stm32_dwmac_clk_disable(dwmac, true); if (dwmac->ops->suspend) ret = dwmac->ops->suspend(dwmac); @@ -490,7 +464,7 @@ static int stm32_dwmac_resume(struct device *dev) if (dwmac->ops->resume) dwmac->ops->resume(dwmac); - ret = stm32_dwmac_init(priv->plat); + ret = stm32_dwmac_init(priv->plat, true); if (ret) return ret; @@ -505,13 +479,11 @@ static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, static struct stm32_ops stm32mcu_dwmac_data = { .set_mode = stm32mcu_set_mode, - .suspend = stm32mcu_suspend, .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK }; static struct stm32_ops stm32mp1_dwmac_data = { .set_mode = stm32mp1_set_mode, - .clk_prepare = stm32mp1_clk_prepare, .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 465ff1fd47..b21d99faa2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; u32 v; @@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_TX_INT) { ret |= handle_tx; - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_TX_DMA_STOP_INT) @@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_RX_INT) { ret |= handle_rx; - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_RX_BUF_UA_INT) @@ -1224,7 +1223,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) if (ret) return -EINVAL; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -1244,7 +1243,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat); if (ret) - goto dwmac_deconfig; + return ret; ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); if (ret) @@ -1295,8 +1294,6 @@ dwmac_exit: sun8i_dwmac_exit(pdev, gmac); dwmac_syscon: sun8i_dwmac_unset_syscon(gmac); -dwmac_deconfig: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index beceeae579..2653a9f095 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -108,36 +108,31 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!gmac) + return -ENOMEM; ret = of_get_phy_mode(dev->of_node, &gmac->interface); if (ret && ret != -ENODEV) { dev_err(dev, "Can't get phy-mode\n"); - goto err_remove_config_dt; + return ret; } gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); if (IS_ERR(gmac->tx_clk)) { dev_err(dev, "could not get tx clock\n"); - ret = PTR_ERR(gmac->tx_clk); - goto err_remove_config_dt; + return PTR_ERR(gmac->tx_clk); } /* Optional regulator for PHY */ gmac->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(gmac->regulator)) { - if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_remove_config_dt; - } + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; dev_info(dev, "no regulator found\n"); gmac->regulator = NULL; } @@ -155,7 +150,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -165,8 +160,6 @@ static int sun7i_gmac_probe(struct platform_device *pdev) err_gmac_exit: sun7i_gmac_exit(pdev, plat_dat->bsp_priv); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index e0f3cbd368..362f85136c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -284,7 +284,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) if (err < 0) goto disable_clks; - plat = stmmac_probe_config_dt(pdev, res.mac); + plat = devm_stmmac_probe_config_dt(pdev, res.mac); if (IS_ERR(plat)) { err = PTR_ERR(plat); goto disable_clks; @@ -303,7 +303,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) GFP_KERNEL); if (!plat->mdio_bus_data) { err = -ENOMEM; - goto remove; + goto disable_clks; } } @@ -321,7 +321,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) 500, 500 * 2000); if (err < 0) { dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n"); - goto remove; + goto disable_clks; } plat->serdes_powerup = mgbe_uphy_lane_bringup_serdes_up; @@ -342,12 +342,10 @@ static int tegra_mgbe_probe(struct platform_device *pdev) err = stmmac_dvr_probe(&pdev->dev, plat, &res); if (err < 0) - goto remove; + goto disable_clks; return 0; -remove: - stmmac_remove_config_dt(pdev, plat); disable_clks: clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index 22d113fb8e..a5a5cfa989 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -220,15 +220,13 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto remove_config; - } + if (!dwmac) + return -ENOMEM; spin_lock_init(&dwmac->lock); dwmac->reg = stmmac_res.addr; @@ -238,7 +236,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) ret = visconti_eth_clock_probe(pdev, plat_dat); if (ret) - goto remove_config; + return ret; visconti_eth_init_hw(pdev, plat_dat); @@ -252,22 +250,14 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) remove: visconti_eth_clock_remove(pdev); -remove_config: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } static void visconti_eth_dwmac_remove(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - stmmac_pltfr_remove(pdev); - visconti_eth_clock_remove(pdev); - - stmmac_remove_config_dt(pdev, priv->plat); } static const struct of_device_id visconti_eth_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 9470d3fd2d..0d185e54eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; if (dir == DMA_DIR_RX) @@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, } /* TX/RX NORMAL interrupts */ if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & DMA_CHAN_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 7907d62d34..85e18f9a22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status) int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); @@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, u32 value = readl(ioaddr + DMA_INTR_ENA); /* to schedule NAPI on real RIE event. */ if (likely(value & DMA_INTR_ENA_RIE)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } } if (likely(intr_status & DMA_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } if (unlikely(intr_status & DMA_STATUS_ERI)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index a4e8b498de..1739484747 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -319,6 +319,8 @@ #define XGMAC_RXCEIE BIT(4) #define XGMAC_TXCEIE BIT(0) #define XGMAC_MTL_ECC_INT_STATUS 0x000010cc +#define XGMAC_MTL_DPP_CONTROL 0x000010e0 +#define XGMAC_DPP_DISABLE BIT(0) #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 @@ -401,6 +403,7 @@ #define XGMAC_DCEIE BIT(1) #define XGMAC_TCEIE BIT(0) #define XGMAC_DMA_ECC_INT_STATUS 0x0000306c +#define XGMAC_DMA_DPP_INT_STATUS 0x00003074 #define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) #define XGMAC_SPH BIT(24) #define XGMAC_PBLx8 BIT(16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index a74e71db79..b5509f244e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { { false, "UNKNOWN", "Unknown Error" }, /* 31 */ }; +#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" +#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" + +static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { + { true, "TDPES0", DPP_TX_ERR }, + { true, "TDPES1", DPP_TX_ERR }, + { true, "TDPES2", DPP_TX_ERR }, + { true, "TDPES3", DPP_TX_ERR }, + { true, "TDPES4", DPP_TX_ERR }, + { true, "TDPES5", DPP_TX_ERR }, + { true, "TDPES6", DPP_TX_ERR }, + { true, "TDPES7", DPP_TX_ERR }, + { true, "TDPES8", DPP_TX_ERR }, + { true, "TDPES9", DPP_TX_ERR }, + { true, "TDPES10", DPP_TX_ERR }, + { true, "TDPES11", DPP_TX_ERR }, + { true, "TDPES12", DPP_TX_ERR }, + { true, "TDPES13", DPP_TX_ERR }, + { true, "TDPES14", DPP_TX_ERR }, + { true, "TDPES15", DPP_TX_ERR }, + { true, "RDPES0", DPP_RX_ERR }, + { true, "RDPES1", DPP_RX_ERR }, + { true, "RDPES2", DPP_RX_ERR }, + { true, "RDPES3", DPP_RX_ERR }, + { true, "RDPES4", DPP_RX_ERR }, + { true, "RDPES5", DPP_RX_ERR }, + { true, "RDPES6", DPP_RX_ERR }, + { true, "RDPES7", DPP_RX_ERR }, + { true, "RDPES8", DPP_RX_ERR }, + { true, "RDPES9", DPP_RX_ERR }, + { true, "RDPES10", DPP_RX_ERR }, + { true, "RDPES11", DPP_RX_ERR }, + { true, "RDPES12", DPP_RX_ERR }, + { true, "RDPES13", DPP_RX_ERR }, + { true, "RDPES14", DPP_RX_ERR }, + { true, "RDPES15", DPP_RX_ERR }, +}; + static void dwxgmac3_handle_dma_err(struct net_device *ndev, void __iomem *ioaddr, bool correctable, struct stmmac_safety_stats *stats) @@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev, dwxgmac3_log_error(ndev, value, correctable, "DMA", dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats); + + value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS); + + dwxgmac3_log_error(ndev, value, false, "DMA_DPP", + dwxgmac3_dma_dpp_errors, + STAT_OFF(dma_dpp_errors), stats); } static int @@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp, value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL); + /* 5. Enable Data Path Parity Protection */ + value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL); + /* already enabled by default, explicit enable it again */ + value &= ~XGMAC_DPP_DISABLE; + writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL); + return 0; } @@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev, ret |= !corr; } - err = dma & (XGMAC_DEUIS | XGMAC_DECIS); + /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in + * DMA_Safety_Interrupt_Status, so we handle DMA Data Path + * Parity Errors here + */ + err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS); corr = dma & XGMAC_DECIS; if (err) { dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats); @@ -930,6 +985,7 @@ static const struct dwxgmac3_error { { dwxgmac3_mac_errors }, { dwxgmac3_mtl_errors }, { dwxgmac3_dma_errors }, + { dwxgmac3_dma_dpp_errors }, }; static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 3cde695fec..dd2ab6185c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; @@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, /* TX/RX NORMAL interrupts */ if (likely(intr_status & XGMAC_NIS)) { if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 69c8c25285..f9c1f3f4a1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -539,44 +539,79 @@ stmmac_set_pauseparam(struct net_device *netdev, } } +static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + +static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int start; - int q, stat; - char *p; + int q; for (q = 0; q < tx_cnt; q++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; - struct stmmac_txq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n); - for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_tx_normal_irq_n(priv, q); } for (q = 0; q < rx_cnt; q++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; - struct stmmac_rxq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n); - for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_rx_normal_irq_n(priv, q); } } @@ -635,39 +670,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, pos = j; for (i = 0; i < rx_queues_count; i++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; - struct stmmac_rxq_stats snapshot; + struct stmmac_napi_rx_stats snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - - data[j++] += snapshot.rx_pkt_n; - data[j++] += snapshot.rx_normal_irq_n; - normal_irq_n += snapshot.rx_normal_irq_n; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + snapshot = rxq_stats->napi; + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&snapshot.rx_pkt_n); + n_irq = stmmac_get_rx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + napi_poll += u64_stats_read(&snapshot.poll); } pos = j; for (i = 0; i < tx_queues_count; i++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; - struct stmmac_txq_stats snapshot; + struct stmmac_napi_tx_stats napi_snapshot; + struct stmmac_q_tx_stats q_snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - - data[j++] += snapshot.tx_pkt_n; - data[j++] += snapshot.tx_normal_irq_n; - normal_irq_n += snapshot.tx_normal_irq_n; - data[j++] += snapshot.tx_clean; - data[j++] += snapshot.tx_set_ic_bit; - data[j++] += snapshot.tx_tso_frames; - data[j++] += snapshot.tx_tso_nfrags; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + q_snapshot = txq_stats->q; + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + napi_snapshot = txq_stats->napi; + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n); + n_irq = stmmac_get_tx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + data[j++] += u64_stats_read(&napi_snapshot.tx_clean); + data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) + + u64_stats_read(&napi_snapshot.tx_set_ic_bit); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags); + napi_poll += u64_stats_read(&napi_snapshot.poll); } normal_irq_n += priv->xstats.rx_early_irq; data[j++] = normal_irq_n; @@ -984,7 +1029,7 @@ static int __stmmac_set_coalesce(struct net_device *dev, else if (queue >= max_cnt) return -EINVAL; - if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { + if (priv->use_riwt) { rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 292857c0e6..e9a1b60ebb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2442,7 +2442,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) struct xdp_desc xdp_desc; bool work_done = true; u32 tx_set_ic_bit = 0; - unsigned long flags; /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); @@ -2515,9 +2514,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit += tx_set_ic_bit; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); + u64_stats_update_end(&txq_stats->napi_syncp); if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); @@ -2552,16 +2551,19 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) * @priv: driver private structure * @budget: napi budget limiting this functions packet handling * @queue: TX queue index + * @pending_packets: signal to arm the TX coal timer * Description: it reclaims the transmit resources after transmission completes. + * If some packets still needs to be handled, due to TX coalesce, set + * pending_packets to true to make NAPI arm the TX coal timer. */ -static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, + bool *pending_packets) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; u32 tx_packets = 0, tx_errors = 0; - unsigned long flags; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); @@ -2715,13 +2717,13 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - stmmac_tx_timer_arm(priv, queue); + *pending_packets = true; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_packets += tx_packets; - txq_stats->tx_pkt_n += tx_packets; - txq_stats->tx_clean++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); + u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); + u64_stats_inc(&txq_stats->napi.tx_clean); + u64_stats_update_end(&txq_stats->napi_syncp); priv->xstats.tx_errors += tx_errors; @@ -3005,13 +3007,25 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; u32 tx_coal_timer = priv->tx_coal_timer[queue]; + struct stmmac_channel *ch; + struct napi_struct *napi; if (!tx_coal_timer) return; - hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(tx_coal_timer), - HRTIMER_MODE_REL); + ch = &priv->channel[tx_q->queue_index]; + napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; + + /* Arm timer only if napi is not already scheduled. + * Try to cancel any timer if napi is scheduled, timer will be armed + * again in the next scheduled napi. + */ + if (unlikely(!napi_is_scheduled(napi))) + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(tx_coal_timer), + HRTIMER_MODE_REL); + else + hrtimer_try_to_cancel(&tx_q->txtimer); } /** @@ -3853,6 +3867,9 @@ static int __stmmac_open(struct net_device *dev, priv->rx_copybreak = STMMAC_RX_COPYBREAK; buf_sz = dma_conf->dma_buf_sz; + for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) + dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); stmmac_reset_queues_param(priv); @@ -4131,7 +4148,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; - unsigned long flags; u32 pay_len, mss; dma_addr_t des; int i; @@ -4296,13 +4312,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; - txq_stats->tx_tso_frames++; - txq_stats->tx_tso_nfrags += nfrags; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); + u64_stats_inc(&txq_stats->q.tx_tso_frames); + u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4401,7 +4417,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; int entry, first_tx; - unsigned long flags; dma_addr_t des; tx_q = &priv->dma_conf.tx_queue[queue]; @@ -4571,11 +4586,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4839,12 +4854,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, set_ic = false; if (set_ic) { - unsigned long flags; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); } stmmac_enable_dma_transmission(priv, priv->ioaddr); @@ -4994,7 +5008,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; int coe = priv->hw->rx_csum; - unsigned long flags; struct sk_buff *skb; u32 hash; @@ -5019,10 +5032,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n++; - rxq_stats->rx_bytes += len; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.rx_pkt_n); + u64_stats_add(&rxq_stats->napi.rx_bytes, len); + u64_stats_update_end(&rxq_stats->napi_syncp); } static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@ -5104,7 +5117,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct bpf_prog *prog; bool failure = false; - unsigned long flags; int xdp_status = 0; int status = 0; @@ -5259,9 +5271,9 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5299,7 +5311,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; - unsigned long flags; int xdp_status = 0; int buf_sz; @@ -5552,11 +5563,11 @@ drain_data: stmmac_rx_refill(priv, queue); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_packets += rx_packets; - rxq_stats->rx_bytes += rx_bytes; - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); + u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5571,13 +5582,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) struct stmmac_priv *priv = ch->priv_data; struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; - unsigned long flags; int work_done; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5597,16 +5607,16 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) container_of(napi, struct stmmac_channel, tx_napi); struct stmmac_priv *priv = ch->priv_data; struct stmmac_txq_stats *txq_stats; + bool pending_packets = false; u32 chan = ch->index; - unsigned long flags; int work_done; txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); - work_done = stmmac_tx_clean(priv, budget, chan); + work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets); work_done = min(work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5617,6 +5627,10 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) spin_unlock_irqrestore(&ch->lock, flags); } + /* TX still have packet to handle, check if we need to arm tx timer */ + if (pending_packets) + stmmac_tx_timer_arm(priv, chan); + return work_done; } @@ -5625,23 +5639,23 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, rxtx_napi); struct stmmac_priv *priv = ch->priv_data; + bool tx_pending_packets = false; int rx_done, tx_done, rxtx_done; struct stmmac_rxq_stats *rxq_stats; struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; - unsigned long flags; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); - tx_done = stmmac_tx_clean(priv, budget, chan); + tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets); tx_done = min(tx_done, budget); rx_done = stmmac_rx_zc(priv, budget, chan); @@ -5666,6 +5680,10 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) spin_unlock_irqrestore(&ch->lock, flags); } + /* TX still have packet to handle, check if we need to arm tx timer */ + if (tx_pending_packets) + stmmac_tx_timer_arm(priv, chan); + return min(rxtx_done, budget - 1); } @@ -5959,11 +5977,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -5979,11 +5992,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6005,11 +6013,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6036,11 +6039,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6961,10 +6959,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 tx_bytes; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - tx_packets = txq_stats->tx_packets; - tx_bytes = txq_stats->tx_bytes; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; @@ -6976,10 +6977,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 rx_bytes; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - rx_packets = rxq_stats->rx_packets; - rx_bytes = rxq_stats->rx_bytes; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); + rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -7373,9 +7374,16 @@ int stmmac_dvr_probe(struct device *device, priv->dev = ndev; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) - u64_stats_init(&priv->xstats.rxq_stats[i].syncp); - for (i = 0; i < MTL_MAX_TX_QUEUES; i++) - u64_stats_init(&priv->xstats.txq_stats[i].syncp); + u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); + u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); + } + + priv->xstats.pcpu_stats = + devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats); + if (!priv->xstats.pcpu_stats) + return -ENOMEM; stmmac_set_ethtool_ops(ndev); priv->pause = pause; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 30d5e63519..1ffde555da 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -387,6 +387,22 @@ static int stmmac_of_get_mac_mode(struct device_node *np) return -ENODEV; } +/** + * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() + * @pdev: platform_device structure + * @plat: driver data platform structure + * + * Release resources claimed by stmmac_probe_config_dt(). + */ +static void stmmac_remove_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) +{ + clk_disable_unprepare(plat->stmmac_clk); + clk_disable_unprepare(plat->pclk); + of_node_put(plat->phy_node); + of_node_put(plat->mdio_node); +} + /** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure @@ -395,7 +411,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np) * this function is to read the driver parameters from device-tree and * set some private fields that will be used by the main at runtime. */ -struct plat_stmmacenet_data * +static struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { struct device_node *np = pdev->dev.of_node; @@ -665,43 +681,14 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) return plat; } - -/** - * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() - * @pdev: platform_device structure - * @plat: driver data platform structure - * - * Release resources claimed by stmmac_probe_config_dt(). - */ -void stmmac_remove_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) -{ - clk_disable_unprepare(plat->stmmac_clk); - clk_disable_unprepare(plat->pclk); - of_node_put(plat->phy_node); - of_node_put(plat->mdio_node); -} #else -struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) -{ - return ERR_PTR(-EINVAL); -} - struct plat_stmmacenet_data * devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { return ERR_PTR(-EINVAL); } - -void stmmac_remove_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) -{ -} #endif /* CONFIG_OF */ -EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt); -EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res) @@ -810,7 +797,7 @@ static void devm_stmmac_pltfr_remove(void *data) { struct platform_device *pdev = data; - stmmac_pltfr_remove_no_dt(pdev); + stmmac_pltfr_remove(pdev); } /** @@ -837,12 +824,12 @@ int devm_stmmac_pltfr_probe(struct platform_device *pdev, EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe); /** - * stmmac_pltfr_remove_no_dt + * stmmac_pltfr_remove * @pdev: pointer to the platform device * Description: This undoes the effects of stmmac_pltfr_probe() by removing the * driver and calling the platform's exit() callback. */ -void stmmac_pltfr_remove_no_dt(struct platform_device *pdev) +void stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -851,23 +838,6 @@ void stmmac_pltfr_remove_no_dt(struct platform_device *pdev) stmmac_dvr_remove(&pdev->dev); stmmac_pltfr_exit(pdev, plat); } -EXPORT_SYMBOL_GPL(stmmac_pltfr_remove_no_dt); - -/** - * stmmac_pltfr_remove - * @pdev: platform device pointer - * Description: this function calls the main to free the net resources - * and calls the platforms hook and release the resources (e.g. mem). - */ -void stmmac_pltfr_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - struct plat_stmmacenet_data *plat = priv->plat; - - stmmac_pltfr_remove_no_dt(pdev); - stmmac_remove_config_dt(pdev, plat); -} EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index c5565b2a70..bb6fc7e59a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -11,12 +11,8 @@ #include "stmmac.h" -struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); struct plat_stmmacenet_data * devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); -void stmmac_remove_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res); @@ -32,7 +28,6 @@ int stmmac_pltfr_probe(struct platform_device *pdev, int devm_stmmac_pltfr_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat, struct stmmac_resources *res); -void stmmac_pltfr_remove_no_dt(struct platform_device *pdev); void stmmac_pltfr_remove(struct platform_device *pdev); extern const struct dev_pm_ops stmmac_pltfr_pm_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 3d7825cb30..bffa5c0170 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -81,7 +81,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); write_unlock_irqrestore(&priv->ptp_lock, flags); - /* Caculate new basetime and re-configured EST after PTP time adjust. */ + /* Calculate new basetime and re-configured EST after PTP time adjust. */ if (est_rst) { struct timespec64 current_time, time; ktime_t current_time_ns, basetime; @@ -191,26 +191,33 @@ static int stmmac_enable(struct ptp_clock_info *ptp, priv->systime_flags); write_unlock_irqrestore(&priv->ptp_lock, flags); break; - case PTP_CLK_REQ_EXTTS: - if (on) - priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN; - else - priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN; + case PTP_CLK_REQ_EXTTS: { + u8 channel; + mutex_lock(&priv->aux_ts_lock); acr_value = readl(ptpaddr + PTP_ACR); + channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value)); acr_value &= ~PTP_ACR_MASK; + if (on) { + if (FIELD_GET(PTP_ACR_MASK, acr_value)) { + netdev_err(priv->dev, + "Cannot enable auxiliary snapshot %d as auxiliary snapshot %d is already enabled", + rq->extts.index, channel); + mutex_unlock(&priv->aux_ts_lock); + return -EBUSY; + } + + priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN; + /* Enable External snapshot trigger */ - acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSEN(rq->extts.index); acr_value |= PTP_ACR_ATSFC; - netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", - priv->plat->ext_snapshot_num >> - PTP_ACR_ATSEN_SHIFT); } else { - netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", - priv->plat->ext_snapshot_num >> - PTP_ACR_ATSEN_SHIFT); + priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN; } + netdev_dbg(priv->dev, "Auxiliary Snapshot %d %s.\n", + rq->extts.index, on ? "enabled" : "disabled"); writel(acr_value, ptpaddr + PTP_ACR); mutex_unlock(&priv->aux_ts_lock); /* wait for auxts fifo clear to finish */ @@ -218,6 +225,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp, !(acr_value & PTP_ACR_ATSFC), 10, 10000); break; + } default: break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index d1fe4b46f1..fce3fba2ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -79,7 +79,7 @@ #define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ #define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ #define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ -#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ +#define PTP_ACR_ATSEN(index) (PTP_ACR_ATSEN0 << (index)) #define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ #define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ #define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index f9e43fc32e..3ca1c2a816 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -802,7 +802,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv) stmmac_start_rx(priv, priv->ioaddr, i); local_bh_disable(); - napi_reschedule(&ch->rx_napi); + napi_schedule(&ch->rx_napi); local_bh_enable(); } diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 011d74087f..21431f43e4 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -10132,7 +10132,7 @@ err_out: return err; } -static int niu_of_remove(struct platform_device *op) +static void niu_of_remove(struct platform_device *op) { struct net_device *dev = platform_get_drvdata(op); @@ -10165,7 +10165,6 @@ static int niu_of_remove(struct platform_device *op) free_netdev(dev); } - return 0; } static const struct of_device_id niu_match[] = { @@ -10183,7 +10182,7 @@ static struct platform_driver niu_of_driver = { .of_match_table = niu_match, }, .probe = niu_of_probe, - .remove = niu_of_remove, + .remove_new = niu_of_remove, }; #endif /* CONFIG_SPARC64 */ diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index cc34d92d2e..16c86b13c1 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1234,7 +1234,7 @@ static int bigmac_sbus_probe(struct platform_device *op) return bigmac_ether_init(op, qec_op); } -static int bigmac_sbus_remove(struct platform_device *op) +static void bigmac_sbus_remove(struct platform_device *op) { struct bigmac *bp = platform_get_drvdata(op); struct device *parent = op->dev.parent; @@ -1255,8 +1255,6 @@ static int bigmac_sbus_remove(struct platform_device *op) bp->bblock_dvma); free_netdev(net_dev); - - return 0; } static const struct of_device_id bigmac_sbus_match[] = { @@ -1274,7 +1272,7 @@ static struct platform_driver bigmac_sbus_driver = { .of_match_table = bigmac_sbus_match, }, .probe = bigmac_sbus_probe, - .remove = bigmac_sbus_remove, + .remove_new = bigmac_sbus_remove, }; module_platform_driver(bigmac_sbus_driver); diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index b37360f449..aedd13c942 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -933,7 +933,7 @@ static int qec_sbus_probe(struct platform_device *op) return qec_ether_init(op); } -static int qec_sbus_remove(struct platform_device *op) +static void qec_sbus_remove(struct platform_device *op) { struct sunqe *qp = platform_get_drvdata(op); struct net_device *net_dev = qp->dev; @@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op) qp->buffers, qp->buffers_dvma); free_netdev(net_dev); - - return 0; } static const struct of_device_id qec_sbus_match[] = { @@ -967,7 +965,7 @@ static struct platform_driver qec_sbus_driver = { .of_match_table = qec_sbus_match, }, .probe = qec_sbus_probe, - .remove = qec_sbus_remove, + .remove_new = qec_sbus_remove, }; static int __init qec_init(void) diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index c499a14314..391a1bc7f4 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -511,7 +511,7 @@ out_clk_disable: return ret; } -static int spl2sw_remove(struct platform_device *pdev) +static void spl2sw_remove(struct platform_device *pdev) { struct spl2sw_common *comm; int i; @@ -538,8 +538,6 @@ static int spl2sw_remove(struct platform_device *pdev) spl2sw_mdio_remove(comm); clk_disable_unprepare(comm->clk); - - return 0; } static const struct of_device_id spl2sw_of_match[] = { @@ -551,7 +549,7 @@ MODULE_DEVICE_TABLE(of, spl2sw_of_match); static struct platform_driver spl2sw_driver = { .probe = spl2sw_probe, - .remove = spl2sw_remove, + .remove_new = spl2sw_remove, .driver = { .name = "sp7021_emac", .of_match_table = spl2sw_of_match, diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index cac61f5d3f..e60b557d59 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_TI bool "Texas Instruments (TI) devices" default y - depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 + depends on PCI || EISA || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 help If you have a network (Ethernet) card belonging to this class, say Y. @@ -180,13 +180,6 @@ config TLAN Please email feedback to . -config CPMAC - tristate "TI AR7 CPMAC Ethernet support" - depends on AR7 - select PHYLIB - help - TI AR7 CPMAC Ethernet support - config TI_ICSSG_PRUETH tristate "TI Gigabit PRU Ethernet driver" select PHYLIB diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 67bed861f3..27de1d6971 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o obj-$(CONFIG_TLAN) += tlan.o -obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index c62b0f99f2..f7a6d720eb 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1588,10 +1588,10 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy /* rx_pause/tx_pause */ if (rx_pause) - mac_control |= CPSW_SL_CTL_RX_FLOW_EN; + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; if (tx_pause) - mac_control |= CPSW_SL_CTL_TX_FLOW_EN; + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c deleted file mode 100644 index 80eeeb463c..0000000000 --- a/drivers/net/ethernet/ti/cpmac.c +++ /dev/null @@ -1,1251 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2006, 2007 Eugene Konev - * - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -MODULE_AUTHOR("Eugene Konev "); -MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:cpmac"); - -static int debug_level = 8; -static int dumb_switch; - -/* Next 2 are only used in cpmac_probe, so it's pointless to change them */ -module_param(debug_level, int, 0444); -module_param(dumb_switch, int, 0444); - -MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); -MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); - -#define CPMAC_VERSION "0.5.2" -/* frame size + 802.1q tag + FCS size */ -#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) -#define CPMAC_QUEUES 8 - -/* Ethernet registers */ -#define CPMAC_TX_CONTROL 0x0004 -#define CPMAC_TX_TEARDOWN 0x0008 -#define CPMAC_RX_CONTROL 0x0014 -#define CPMAC_RX_TEARDOWN 0x0018 -#define CPMAC_MBP 0x0100 -#define MBP_RXPASSCRC 0x40000000 -#define MBP_RXQOS 0x20000000 -#define MBP_RXNOCHAIN 0x10000000 -#define MBP_RXCMF 0x01000000 -#define MBP_RXSHORT 0x00800000 -#define MBP_RXCEF 0x00400000 -#define MBP_RXPROMISC 0x00200000 -#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) -#define MBP_RXBCAST 0x00002000 -#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) -#define MBP_RXMCAST 0x00000020 -#define MBP_MCASTCHAN(channel) ((channel) & 0x7) -#define CPMAC_UNICAST_ENABLE 0x0104 -#define CPMAC_UNICAST_CLEAR 0x0108 -#define CPMAC_MAX_LENGTH 0x010c -#define CPMAC_BUFFER_OFFSET 0x0110 -#define CPMAC_MAC_CONTROL 0x0160 -#define MAC_TXPTYPE 0x00000200 -#define MAC_TXPACE 0x00000040 -#define MAC_MII 0x00000020 -#define MAC_TXFLOW 0x00000010 -#define MAC_RXFLOW 0x00000008 -#define MAC_MTEST 0x00000004 -#define MAC_LOOPBACK 0x00000002 -#define MAC_FDX 0x00000001 -#define CPMAC_MAC_STATUS 0x0164 -#define MAC_STATUS_QOS 0x00000004 -#define MAC_STATUS_RXFLOW 0x00000002 -#define MAC_STATUS_TXFLOW 0x00000001 -#define CPMAC_TX_INT_ENABLE 0x0178 -#define CPMAC_TX_INT_CLEAR 0x017c -#define CPMAC_MAC_INT_VECTOR 0x0180 -#define MAC_INT_STATUS 0x00080000 -#define MAC_INT_HOST 0x00040000 -#define MAC_INT_RX 0x00020000 -#define MAC_INT_TX 0x00010000 -#define CPMAC_MAC_EOI_VECTOR 0x0184 -#define CPMAC_RX_INT_ENABLE 0x0198 -#define CPMAC_RX_INT_CLEAR 0x019c -#define CPMAC_MAC_INT_ENABLE 0x01a8 -#define CPMAC_MAC_INT_CLEAR 0x01ac -#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) -#define CPMAC_MAC_ADDR_MID 0x01d0 -#define CPMAC_MAC_ADDR_HI 0x01d4 -#define CPMAC_MAC_HASH_LO 0x01d8 -#define CPMAC_MAC_HASH_HI 0x01dc -#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) -#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) -#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) -#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) -#define CPMAC_REG_END 0x0680 - -/* Rx/Tx statistics - * TODO: use some of them to fill stats in cpmac_stats() - */ -#define CPMAC_STATS_RX_GOOD 0x0200 -#define CPMAC_STATS_RX_BCAST 0x0204 -#define CPMAC_STATS_RX_MCAST 0x0208 -#define CPMAC_STATS_RX_PAUSE 0x020c -#define CPMAC_STATS_RX_CRC 0x0210 -#define CPMAC_STATS_RX_ALIGN 0x0214 -#define CPMAC_STATS_RX_OVER 0x0218 -#define CPMAC_STATS_RX_JABBER 0x021c -#define CPMAC_STATS_RX_UNDER 0x0220 -#define CPMAC_STATS_RX_FRAG 0x0224 -#define CPMAC_STATS_RX_FILTER 0x0228 -#define CPMAC_STATS_RX_QOSFILTER 0x022c -#define CPMAC_STATS_RX_OCTETS 0x0230 - -#define CPMAC_STATS_TX_GOOD 0x0234 -#define CPMAC_STATS_TX_BCAST 0x0238 -#define CPMAC_STATS_TX_MCAST 0x023c -#define CPMAC_STATS_TX_PAUSE 0x0240 -#define CPMAC_STATS_TX_DEFER 0x0244 -#define CPMAC_STATS_TX_COLLISION 0x0248 -#define CPMAC_STATS_TX_SINGLECOLL 0x024c -#define CPMAC_STATS_TX_MULTICOLL 0x0250 -#define CPMAC_STATS_TX_EXCESSCOLL 0x0254 -#define CPMAC_STATS_TX_LATECOLL 0x0258 -#define CPMAC_STATS_TX_UNDERRUN 0x025c -#define CPMAC_STATS_TX_CARRIERSENSE 0x0260 -#define CPMAC_STATS_TX_OCTETS 0x0264 - -#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) -#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ - (reg))) - -/* MDIO bus */ -#define CPMAC_MDIO_VERSION 0x0000 -#define CPMAC_MDIO_CONTROL 0x0004 -#define MDIOC_IDLE 0x80000000 -#define MDIOC_ENABLE 0x40000000 -#define MDIOC_PREAMBLE 0x00100000 -#define MDIOC_FAULT 0x00080000 -#define MDIOC_FAULTDETECT 0x00040000 -#define MDIOC_INTTEST 0x00020000 -#define MDIOC_CLKDIV(div) ((div) & 0xff) -#define CPMAC_MDIO_ALIVE 0x0008 -#define CPMAC_MDIO_LINK 0x000c -#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) -#define MDIO_BUSY 0x80000000 -#define MDIO_WRITE 0x40000000 -#define MDIO_REG(reg) (((reg) & 0x1f) << 21) -#define MDIO_PHY(phy) (((phy) & 0x1f) << 16) -#define MDIO_DATA(data) ((data) & 0xffff) -#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) -#define PHYSEL_LINKSEL 0x00000040 -#define PHYSEL_LINKINT 0x00000020 - -struct cpmac_desc { - u32 hw_next; - u32 hw_data; - u16 buflen; - u16 bufflags; - u16 datalen; - u16 dataflags; -#define CPMAC_SOP 0x8000 -#define CPMAC_EOP 0x4000 -#define CPMAC_OWN 0x2000 -#define CPMAC_EOQ 0x1000 - struct sk_buff *skb; - struct cpmac_desc *next; - struct cpmac_desc *prev; - dma_addr_t mapping; - dma_addr_t data_mapping; -}; - -struct cpmac_priv { - spinlock_t lock; - spinlock_t rx_lock; - struct cpmac_desc *rx_head; - int ring_size; - struct cpmac_desc *desc_ring; - dma_addr_t dma_ring; - void __iomem *regs; - struct mii_bus *mii_bus; - char phy_name[MII_BUS_ID_SIZE + 3]; - int oldlink, oldspeed, oldduplex; - u32 msg_enable; - struct net_device *dev; - struct work_struct reset_work; - struct platform_device *pdev; - struct napi_struct napi; - atomic_t reset_pending; -}; - -static irqreturn_t cpmac_irq(int, void *); -static void cpmac_hw_start(struct net_device *dev); -static void cpmac_hw_stop(struct net_device *dev); -static int cpmac_stop(struct net_device *dev); -static int cpmac_open(struct net_device *dev); - -static void cpmac_dump_regs(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - - for (i = 0; i < CPMAC_REG_END; i += 4) { - if (i % 16 == 0) { - if (i) - printk("\n"); - printk("%s: reg[%p]:", dev->name, priv->regs + i); - } - printk(" %08x", cpmac_read(priv->regs, i)); - } - printk("\n"); -} - -static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) -{ - int i; - - printk("%s: desc[%p]:", dev->name, desc); - for (i = 0; i < sizeof(*desc) / 4; i++) - printk(" %08x", ((u32 *)desc)[i]); - printk("\n"); -} - -static void cpmac_dump_all_desc(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - struct cpmac_desc *dump = priv->rx_head; - - do { - cpmac_dump_desc(dev, dump); - dump = dump->next; - } while (dump != priv->rx_head); -} - -static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) -{ - int i; - - printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); - for (i = 0; i < skb->len; i++) { - if (i % 16 == 0) { - if (i) - printk("\n"); - printk("%s: data[%p]:", dev->name, skb->data + i); - } - printk(" %02x", ((u8 *)skb->data)[i]); - } - printk("\n"); -} - -static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) -{ - u32 val; - - while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) - cpu_relax(); - cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | - MDIO_PHY(phy_id)); - while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) - cpu_relax(); - - return MDIO_DATA(val); -} - -static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, - int reg, u16 val) -{ - while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) - cpu_relax(); - cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | - MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); - - return 0; -} - -static int cpmac_mdio_reset(struct mii_bus *bus) -{ - struct clk *cpmac_clk; - - cpmac_clk = clk_get(&bus->dev, "cpmac"); - if (IS_ERR(cpmac_clk)) { - pr_err("unable to get cpmac clock\n"); - return -1; - } - ar7_device_reset(AR7_RESET_BIT_MDIO); - cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | - MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); - - return 0; -} - -static struct mii_bus *cpmac_mii; - -static void cpmac_set_multicast_list(struct net_device *dev) -{ - struct netdev_hw_addr *ha; - u8 tmp; - u32 mbp, bit, hash[2] = { 0, }; - struct cpmac_priv *priv = netdev_priv(dev); - - mbp = cpmac_read(priv->regs, CPMAC_MBP); - if (dev->flags & IFF_PROMISC) { - cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | - MBP_RXPROMISC); - } else { - cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); - if (dev->flags & IFF_ALLMULTI) { - /* enable all multicast mode */ - cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); - cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); - } else { - /* cpmac uses some strange mac address hashing - * (not crc32) - */ - netdev_for_each_mc_addr(ha, dev) { - bit = 0; - tmp = ha->addr[0]; - bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = ha->addr[1]; - bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = ha->addr[2]; - bit ^= (tmp >> 6) ^ tmp; - tmp = ha->addr[3]; - bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = ha->addr[4]; - bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = ha->addr[5]; - bit ^= (tmp >> 6) ^ tmp; - bit &= 0x3f; - hash[bit / 32] |= 1 << (bit % 32); - } - - cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); - cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); - } - } -} - -static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, - struct cpmac_desc *desc) -{ - struct sk_buff *skb, *result = NULL; - - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_desc(priv->dev, desc); - cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); - if (unlikely(!desc->datalen)) { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, "rx: spurious interrupt\n"); - - return NULL; - } - - skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); - if (likely(skb)) { - skb_put(desc->skb, desc->datalen); - desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); - skb_checksum_none_assert(desc->skb); - priv->dev->stats.rx_packets++; - priv->dev->stats.rx_bytes += desc->datalen; - result = desc->skb; - dma_unmap_single(&priv->dev->dev, desc->data_mapping, - CPMAC_SKB_SIZE, DMA_FROM_DEVICE); - desc->skb = skb; - desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - desc->hw_data = (u32)desc->data_mapping; - if (unlikely(netif_msg_pktdata(priv))) { - netdev_dbg(priv->dev, "received packet:\n"); - cpmac_dump_skb(priv->dev, result); - } - } else { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, - "low on skbs, dropping packet\n"); - - priv->dev->stats.rx_dropped++; - } - - desc->buflen = CPMAC_SKB_SIZE; - desc->dataflags = CPMAC_OWN; - - return result; -} - -static int cpmac_poll(struct napi_struct *napi, int budget) -{ - struct sk_buff *skb; - struct cpmac_desc *desc, *restart; - struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); - int received = 0, processed = 0; - - spin_lock(&priv->rx_lock); - if (unlikely(!priv->rx_head)) { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, "rx: polling, but no queue\n"); - - spin_unlock(&priv->rx_lock); - napi_complete(napi); - return 0; - } - - desc = priv->rx_head; - restart = NULL; - while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { - processed++; - - if ((desc->dataflags & CPMAC_EOQ) != 0) { - /* The last update to eoq->hw_next didn't happen - * soon enough, and the receiver stopped here. - * Remember this descriptor so we can restart - * the receiver after freeing some space. - */ - if (unlikely(restart)) { - if (netif_msg_rx_err(priv)) - netdev_err(priv->dev, "poll found a" - " duplicate EOQ: %p and %p\n", - restart, desc); - goto fatal_error; - } - - restart = desc->next; - } - - skb = cpmac_rx_one(priv, desc); - if (likely(skb)) { - netif_receive_skb(skb); - received++; - } - desc = desc->next; - } - - if (desc != priv->rx_head) { - /* We freed some buffers, but not the whole ring, - * add what we did free to the rx list - */ - desc->prev->hw_next = (u32)0; - priv->rx_head->prev->hw_next = priv->rx_head->mapping; - } - - /* Optimization: If we did not actually process an EOQ (perhaps because - * of quota limits), check to see if the tail of the queue has EOQ set. - * We should immediately restart in that case so that the receiver can - * restart and run in parallel with more packet processing. - * This lets us handle slightly larger bursts before running - * out of ring space (assuming dev->weight < ring_size) - */ - - if (!restart && - (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) - == CPMAC_EOQ && - (priv->rx_head->dataflags & CPMAC_OWN) != 0) { - /* reset EOQ so the poll loop (above) doesn't try to - * restart this when it eventually gets to this descriptor. - */ - priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; - restart = priv->rx_head; - } - - if (restart) { - priv->dev->stats.rx_errors++; - priv->dev->stats.rx_fifo_errors++; - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, "rx dma ring overrun\n"); - - if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { - if (netif_msg_drv(priv)) - netdev_err(priv->dev, "cpmac_poll is trying " - "to restart rx from a descriptor " - "that's not free: %p\n", restart); - goto fatal_error; - } - - cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); - } - - priv->rx_head = desc; - spin_unlock(&priv->rx_lock); - if (unlikely(netif_msg_rx_status(priv))) - netdev_dbg(priv->dev, "poll processed %d packets\n", received); - - if (processed == 0) { - /* we ran out of packets to read, - * revert to interrupt-driven mode - */ - napi_complete(napi); - cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); - return 0; - } - - return 1; - -fatal_error: - /* Something went horribly wrong. - * Reset hardware to try to recover rather than wedging. - */ - if (netif_msg_drv(priv)) { - netdev_err(priv->dev, "cpmac_poll is confused. " - "Resetting hardware\n"); - cpmac_dump_all_desc(priv->dev); - netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", - cpmac_read(priv->regs, CPMAC_RX_PTR(0)), - cpmac_read(priv->regs, CPMAC_RX_ACK(0))); - } - - spin_unlock(&priv->rx_lock); - napi_complete(napi); - netif_tx_stop_all_queues(priv->dev); - napi_disable(&priv->napi); - - atomic_inc(&priv->reset_pending); - cpmac_hw_stop(priv->dev); - if (!schedule_work(&priv->reset_work)) - atomic_dec(&priv->reset_pending); - - return 0; - -} - -static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - int queue; - unsigned int len; - struct cpmac_desc *desc; - struct cpmac_priv *priv = netdev_priv(dev); - - if (unlikely(atomic_read(&priv->reset_pending))) - return NETDEV_TX_BUSY; - - if (unlikely(skb_padto(skb, ETH_ZLEN))) - return NETDEV_TX_OK; - - len = max_t(unsigned int, skb->len, ETH_ZLEN); - queue = skb_get_queue_mapping(skb); - netif_stop_subqueue(dev, queue); - - desc = &priv->desc_ring[queue]; - if (unlikely(desc->dataflags & CPMAC_OWN)) { - if (netif_msg_tx_err(priv) && net_ratelimit()) - netdev_warn(dev, "tx dma ring full\n"); - - return NETDEV_TX_BUSY; - } - - spin_lock(&priv->lock); - spin_unlock(&priv->lock); - desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; - desc->skb = skb; - desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, - DMA_TO_DEVICE); - desc->hw_data = (u32)desc->data_mapping; - desc->datalen = len; - desc->buflen = len; - if (unlikely(netif_msg_tx_queued(priv))) - netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len); - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_desc(dev, desc); - if (unlikely(netif_msg_pktdata(priv))) - cpmac_dump_skb(dev, skb); - cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); - - return NETDEV_TX_OK; -} - -static void cpmac_end_xmit(struct net_device *dev, int queue) -{ - struct cpmac_desc *desc; - struct cpmac_priv *priv = netdev_priv(dev); - - desc = &priv->desc_ring[queue]; - cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); - if (likely(desc->skb)) { - spin_lock(&priv->lock); - dev->stats.tx_packets++; - dev->stats.tx_bytes += desc->skb->len; - spin_unlock(&priv->lock); - dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, - DMA_TO_DEVICE); - - if (unlikely(netif_msg_tx_done(priv))) - netdev_dbg(dev, "sent 0x%p, len=%d\n", - desc->skb, desc->skb->len); - - dev_consume_skb_irq(desc->skb); - desc->skb = NULL; - if (__netif_subqueue_stopped(dev, queue)) - netif_wake_subqueue(dev, queue); - } else { - if (netif_msg_tx_err(priv) && net_ratelimit()) - netdev_warn(dev, "end_xmit: spurious interrupt\n"); - if (__netif_subqueue_stopped(dev, queue)) - netif_wake_subqueue(dev, queue); - } -} - -static void cpmac_hw_stop(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); - - ar7_device_reset(pdata->reset_bit); - cpmac_write(priv->regs, CPMAC_RX_CONTROL, - cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); - cpmac_write(priv->regs, CPMAC_TX_CONTROL, - cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); - for (i = 0; i < 8; i++) { - cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); - cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); - } - cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_CONTROL, - cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); -} - -static void cpmac_hw_start(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); - - ar7_device_reset(pdata->reset_bit); - for (i = 0; i < 8; i++) { - cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); - cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); - } - cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); - - cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | - MBP_RXMCAST); - cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); - for (i = 0; i < 8; i++) - cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); - cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); - cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | - (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | - (dev->dev_addr[3] << 24)); - cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); - cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); - cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); - cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); - - cpmac_write(priv->regs, CPMAC_RX_CONTROL, - cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); - cpmac_write(priv->regs, CPMAC_TX_CONTROL, - cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); - cpmac_write(priv->regs, CPMAC_MAC_CONTROL, - cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | - MAC_FDX); -} - -static void cpmac_clear_rx(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - struct cpmac_desc *desc; - int i; - - if (unlikely(!priv->rx_head)) - return; - desc = priv->rx_head; - for (i = 0; i < priv->ring_size; i++) { - if ((desc->dataflags & CPMAC_OWN) == 0) { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(dev, "packet dropped\n"); - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_desc(dev, desc); - desc->dataflags = CPMAC_OWN; - dev->stats.rx_dropped++; - } - desc->hw_next = desc->next->mapping; - desc = desc->next; - } - priv->rx_head->prev->hw_next = 0; -} - -static void cpmac_clear_tx(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - int i; - - if (unlikely(!priv->desc_ring)) - return; - for (i = 0; i < CPMAC_QUEUES; i++) { - priv->desc_ring[i].dataflags = 0; - if (priv->desc_ring[i].skb) { - dev_kfree_skb_any(priv->desc_ring[i].skb); - priv->desc_ring[i].skb = NULL; - } - } -} - -static void cpmac_hw_error(struct work_struct *work) -{ - struct cpmac_priv *priv = - container_of(work, struct cpmac_priv, reset_work); - - spin_lock(&priv->rx_lock); - cpmac_clear_rx(priv->dev); - spin_unlock(&priv->rx_lock); - cpmac_clear_tx(priv->dev); - cpmac_hw_start(priv->dev); - barrier(); - atomic_dec(&priv->reset_pending); - - netif_tx_wake_all_queues(priv->dev); - cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); -} - -static void cpmac_check_status(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); - int rx_channel = (macstatus >> 8) & 7; - int rx_code = (macstatus >> 12) & 15; - int tx_channel = (macstatus >> 16) & 7; - int tx_code = (macstatus >> 20) & 15; - - if (rx_code || tx_code) { - if (netif_msg_drv(priv) && net_ratelimit()) { - /* Can't find any documentation on what these - * error codes actually are. So just log them and hope.. - */ - if (rx_code) - netdev_warn(dev, "host error %d on rx " - "channel %d (macstatus %08x), resetting\n", - rx_code, rx_channel, macstatus); - if (tx_code) - netdev_warn(dev, "host error %d on tx " - "channel %d (macstatus %08x), resetting\n", - tx_code, tx_channel, macstatus); - } - - netif_tx_stop_all_queues(dev); - cpmac_hw_stop(dev); - if (schedule_work(&priv->reset_work)) - atomic_inc(&priv->reset_pending); - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_regs(dev); - } - cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); -} - -static irqreturn_t cpmac_irq(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct cpmac_priv *priv; - int queue; - u32 status; - - priv = netdev_priv(dev); - - status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); - - if (unlikely(netif_msg_intr(priv))) - netdev_dbg(dev, "interrupt status: 0x%08x\n", status); - - if (status & MAC_INT_TX) - cpmac_end_xmit(dev, (status & 7)); - - if (status & MAC_INT_RX) { - queue = (status >> 8) & 7; - if (napi_schedule_prep(&priv->napi)) { - cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); - __napi_schedule(&priv->napi); - } - } - - cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); - - if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) - cpmac_check_status(dev); - - return IRQ_HANDLED; -} - -static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - spin_lock(&priv->lock); - dev->stats.tx_errors++; - spin_unlock(&priv->lock); - if (netif_msg_tx_err(priv) && net_ratelimit()) - netdev_warn(dev, "transmit timeout\n"); - - atomic_inc(&priv->reset_pending); - barrier(); - cpmac_clear_tx(dev); - barrier(); - atomic_dec(&priv->reset_pending); - - netif_tx_wake_all_queues(priv->dev); -} - -static void cpmac_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ring, - struct netlink_ext_ack *extack) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - ring->rx_max_pending = 1024; - ring->rx_mini_max_pending = 1; - ring->rx_jumbo_max_pending = 1; - ring->tx_max_pending = 1; - - ring->rx_pending = priv->ring_size; - ring->rx_mini_pending = 1; - ring->rx_jumbo_pending = 1; - ring->tx_pending = 1; -} - -static int cpmac_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ring, - struct netlink_ext_ack *extack) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - if (netif_running(dev)) - return -EBUSY; - priv->ring_size = ring->rx_pending; - - return 0; -} - -static void cpmac_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strscpy(info->driver, "cpmac", sizeof(info->driver)); - strscpy(info->version, CPMAC_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); -} - -static const struct ethtool_ops cpmac_ethtool_ops = { - .get_drvinfo = cpmac_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ringparam = cpmac_get_ringparam, - .set_ringparam = cpmac_set_ringparam, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -static void cpmac_adjust_link(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - int new_state = 0; - - spin_lock(&priv->lock); - if (dev->phydev->link) { - netif_tx_start_all_queues(dev); - if (dev->phydev->duplex != priv->oldduplex) { - new_state = 1; - priv->oldduplex = dev->phydev->duplex; - } - - if (dev->phydev->speed != priv->oldspeed) { - new_state = 1; - priv->oldspeed = dev->phydev->speed; - } - - if (!priv->oldlink) { - new_state = 1; - priv->oldlink = 1; - } - } else if (priv->oldlink) { - new_state = 1; - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - } - - if (new_state && netif_msg_link(priv) && net_ratelimit()) - phy_print_status(dev->phydev); - - spin_unlock(&priv->lock); -} - -static int cpmac_open(struct net_device *dev) -{ - int i, size, res; - struct cpmac_priv *priv = netdev_priv(dev); - struct resource *mem; - struct cpmac_desc *desc; - struct sk_buff *skb; - - mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); - if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { - if (netif_msg_drv(priv)) - netdev_err(dev, "failed to request registers\n"); - - res = -ENXIO; - goto fail_reserve; - } - - priv->regs = ioremap(mem->start, resource_size(mem)); - if (!priv->regs) { - if (netif_msg_drv(priv)) - netdev_err(dev, "failed to remap registers\n"); - - res = -ENXIO; - goto fail_remap; - } - - size = priv->ring_size + CPMAC_QUEUES; - priv->desc_ring = dma_alloc_coherent(&dev->dev, - sizeof(struct cpmac_desc) * size, - &priv->dma_ring, - GFP_KERNEL); - if (!priv->desc_ring) { - res = -ENOMEM; - goto fail_alloc; - } - - for (i = 0; i < size; i++) - priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; - - priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; - for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { - skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); - if (unlikely(!skb)) { - res = -ENOMEM; - goto fail_desc; - } - desc->skb = skb; - desc->data_mapping = dma_map_single(&dev->dev, skb->data, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - desc->hw_data = (u32)desc->data_mapping; - desc->buflen = CPMAC_SKB_SIZE; - desc->dataflags = CPMAC_OWN; - desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; - desc->next->prev = desc; - desc->hw_next = (u32)desc->next->mapping; - } - - priv->rx_head->prev->hw_next = (u32)0; - - res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev); - if (res) { - if (netif_msg_drv(priv)) - netdev_err(dev, "failed to obtain irq\n"); - - goto fail_irq; - } - - atomic_set(&priv->reset_pending, 0); - INIT_WORK(&priv->reset_work, cpmac_hw_error); - cpmac_hw_start(dev); - - napi_enable(&priv->napi); - phy_start(dev->phydev); - - return 0; - -fail_irq: -fail_desc: - for (i = 0; i < priv->ring_size; i++) { - if (priv->rx_head[i].skb) { - dma_unmap_single(&dev->dev, - priv->rx_head[i].data_mapping, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_head[i].skb); - } - } - dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size, - priv->desc_ring, priv->dma_ring); - -fail_alloc: - iounmap(priv->regs); - -fail_remap: - release_mem_region(mem->start, resource_size(mem)); - -fail_reserve: - return res; -} - -static int cpmac_stop(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - struct resource *mem; - - netif_tx_stop_all_queues(dev); - - cancel_work_sync(&priv->reset_work); - napi_disable(&priv->napi); - phy_stop(dev->phydev); - - cpmac_hw_stop(dev); - - for (i = 0; i < 8; i++) - cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); - cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); - cpmac_write(priv->regs, CPMAC_MBP, 0); - - free_irq(dev->irq, dev); - iounmap(priv->regs); - mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); - release_mem_region(mem->start, resource_size(mem)); - priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; - for (i = 0; i < priv->ring_size; i++) { - if (priv->rx_head[i].skb) { - dma_unmap_single(&dev->dev, - priv->rx_head[i].data_mapping, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_head[i].skb); - } - } - - dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * - (CPMAC_QUEUES + priv->ring_size), - priv->desc_ring, priv->dma_ring); - - return 0; -} - -static const struct net_device_ops cpmac_netdev_ops = { - .ndo_open = cpmac_open, - .ndo_stop = cpmac_stop, - .ndo_start_xmit = cpmac_start_xmit, - .ndo_tx_timeout = cpmac_tx_timeout, - .ndo_set_rx_mode = cpmac_set_multicast_list, - .ndo_eth_ioctl = phy_do_ioctl_running, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -}; - -static int external_switch; - -static int cpmac_probe(struct platform_device *pdev) -{ - int rc, phy_id; - char mdio_bus_id[MII_BUS_ID_SIZE]; - struct resource *mem; - struct cpmac_priv *priv; - struct net_device *dev; - struct plat_cpmac_data *pdata; - struct phy_device *phydev = NULL; - - pdata = dev_get_platdata(&pdev->dev); - - if (external_switch || dumb_switch) { - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ - phy_id = pdev->id; - } else { - for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { - if (!(pdata->phy_mask & (1 << phy_id))) - continue; - if (!mdiobus_get_phy(cpmac_mii, phy_id)) - continue; - strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); - break; - } - } - - if (phy_id == PHY_MAX_ADDR) { - dev_err(&pdev->dev, "no PHY present, falling back " - "to switch on MDIO bus 0\n"); - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ - phy_id = pdev->id; - } - mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0'; - - dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); - if (!dev) - return -ENOMEM; - - SET_NETDEV_DEV(dev, &pdev->dev); - platform_set_drvdata(pdev, dev); - priv = netdev_priv(dev); - - priv->pdev = pdev; - mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); - if (!mem) { - rc = -ENODEV; - goto fail; - } - - dev->irq = platform_get_irq_byname(pdev, "irq"); - - dev->netdev_ops = &cpmac_netdev_ops; - dev->ethtool_ops = &cpmac_ethtool_ops; - - netif_napi_add(dev, &priv->napi, cpmac_poll); - - spin_lock_init(&priv->lock); - spin_lock_init(&priv->rx_lock); - priv->dev = dev; - priv->ring_size = 64; - priv->msg_enable = netif_msg_init(debug_level, 0xff); - eth_hw_addr_set(dev, pdata->dev_addr); - - snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, - mdio_bus_id, phy_id); - - phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link, - PHY_INTERFACE_MODE_MII); - - if (IS_ERR(phydev)) { - if (netif_msg_drv(priv)) - dev_err(&pdev->dev, "Could not attach to PHY\n"); - - rc = PTR_ERR(phydev); - goto fail; - } - - rc = register_netdev(dev); - if (rc) { - dev_err(&pdev->dev, "Could not register net device\n"); - goto fail; - } - - if (netif_msg_probe(priv)) { - dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, " - "mac: %pM\n", (void *)mem->start, dev->irq, - priv->phy_name, dev->dev_addr); - } - - return 0; - -fail: - free_netdev(dev); - return rc; -} - -static int cpmac_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - - unregister_netdev(dev); - free_netdev(dev); - - return 0; -} - -static struct platform_driver cpmac_driver = { - .driver = { - .name = "cpmac", - }, - .probe = cpmac_probe, - .remove = cpmac_remove, -}; - -int __init cpmac_init(void) -{ - u32 mask; - int i, res; - - cpmac_mii = mdiobus_alloc(); - if (cpmac_mii == NULL) - return -ENOMEM; - - cpmac_mii->name = "cpmac-mii"; - cpmac_mii->read = cpmac_mdio_read; - cpmac_mii->write = cpmac_mdio_write; - cpmac_mii->reset = cpmac_mdio_reset; - - cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); - - if (!cpmac_mii->priv) { - pr_err("Can't ioremap mdio registers\n"); - res = -ENXIO; - goto fail_alloc; - } - - /* FIXME: unhardcode gpio&reset bits */ - ar7_gpio_disable(26); - ar7_gpio_disable(27); - ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); - ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); - ar7_device_reset(AR7_RESET_BIT_EPHY); - - cpmac_mii->reset(cpmac_mii); - - for (i = 0; i < 300; i++) { - mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); - if (mask) - break; - else - msleep(10); - } - - mask &= 0x7fffffff; - if (mask & (mask - 1)) { - external_switch = 1; - mask = 0; - } - - cpmac_mii->phy_mask = ~(mask | 0x80000000); - snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1"); - - res = mdiobus_register(cpmac_mii); - if (res) - goto fail_mii; - - res = platform_driver_register(&cpmac_driver); - if (res) - goto fail_cpmac; - - return 0; - -fail_cpmac: - mdiobus_unregister(cpmac_mii); - -fail_mii: - iounmap(cpmac_mii->priv); - -fail_alloc: - mdiobus_free(cpmac_mii); - - return res; -} - -void __exit cpmac_exit(void) -{ - platform_driver_unregister(&cpmac_driver); - mdiobus_unregister(cpmac_mii); - iounmap(cpmac_mii->priv); - mdiobus_free(cpmac_mii); -} - -module_init(cpmac_init); -module_exit(cpmac_exit); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ca4d4548f8..2ed165dcdb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } } + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 0e4f526b17..9061dca97f 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->slave_num); return; } + + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 0ec85635df..764ed298b5 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1360,7 +1360,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, * particular hardware is sharing a common queue, so the * incoming device might change per packet. */ - xdp_do_flush_map(); + xdp_do_flush(); break; default: bpf_warn_invalid_xdp_action(ndev, prog, act); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2eb9d5a325..b0950a318c 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -47,10 +48,7 @@ #include #include #include -#include -#include #include -#include #include #include @@ -1726,13 +1724,10 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; -static const struct of_device_id davinci_emac_of_match[]; - static struct emac_platform_data * davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; - const struct of_device_id *match; const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; @@ -1779,9 +1774,8 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) pdata->interrupt_disable = auxdata->interrupt_disable; } - match = of_match_device(davinci_emac_of_match, &pdev->dev); - if (match && match->data) { - auxdata = match->data; + auxdata = device_get_match_data(&pdev->dev); + if (auxdata) { pdata->version = auxdata->version; pdata->hw_ram_addr = auxdata->hw_ram_addr; } @@ -1934,18 +1928,20 @@ static int davinci_emac_probe(struct platform_device *pdev) goto err_free_rxchan; ndev->irq = rc; - rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); - if (!rc) - eth_hw_addr_set(ndev, priv->mac_addr); - + /* If the MAC address is not present, read the registers from the SoC */ if (!is_valid_ether_addr(priv->mac_addr)) { - /* Use random MAC if still none obtained. */ - eth_hw_addr_random(ndev); - memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len); - dev_warn(&pdev->dev, "using random MAC addr: %pM\n", - priv->mac_addr); + rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); + if (!rc) + eth_hw_addr_set(ndev, priv->mac_addr); + + if (!is_valid_ether_addr(priv->mac_addr)) { + /* Use random MAC if still none obtained. */ + eth_hw_addr_random(ndev); + memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len); + dev_warn(&pdev->dev, "using random MAC addr: %pM\n", + priv->mac_addr); + } } - ndev->netdev_ops = &emac_netdev_ops; ndev->ethtool_ops = ðtool_ops; netif_napi_add(ndev, &priv->napi, emac_poll); @@ -2002,7 +1998,7 @@ err_free_netdev: * Called when removing the device driver. We disable clock usage and release * the resources taken up by the driver and unregister network device */ -static int davinci_emac_remove(struct platform_device *pdev) +static void davinci_emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct emac_priv *priv = netdev_priv(ndev); @@ -2022,8 +2018,6 @@ static int davinci_emac_remove(struct platform_device *pdev) if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); free_netdev(ndev); - - return 0; } static int davinci_emac_suspend(struct device *dev) @@ -2076,7 +2070,7 @@ static struct platform_driver davinci_emac_driver = { .of_match_table = davinci_emac_of_match, }, .probe = davinci_emac_probe, - .remove = davinci_emac_remove, + .remove_new = davinci_emac_remove, }; /** diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 89b6d23e99..628c87dc1d 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -673,7 +673,7 @@ bail_out: return ret; } -static int davinci_mdio_remove(struct platform_device *pdev) +static void davinci_mdio_remove(struct platform_device *pdev) { struct davinci_mdio_data *data = platform_get_drvdata(pdev); @@ -686,8 +686,6 @@ static int davinci_mdio_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); - - return 0; } #ifdef CONFIG_PM @@ -766,7 +764,7 @@ static struct platform_driver davinci_mdio_driver = { .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, .probe = davinci_mdio_probe, - .remove = davinci_mdio_remove, + .remove_new = davinci_mdio_remove, }; static int __init davinci_mdio_init(void) diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index b272361e37..99de8a40ed 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -433,6 +433,17 @@ int emac_set_port_state(struct prueth_emac *emac, return ret; } +void icssg_config_half_duplex(struct prueth_emac *emac) +{ + u32 val; + + if (!emac->half_duplex) + return; + + val = get_random_u32(); + writel(val, emac->dram.va + HD_RAND_SEED_OFFSET); +} + void icssg_config_set_speed(struct prueth_emac *emac) { u8 fw_speed; @@ -453,5 +464,8 @@ void icssg_config_set_speed(struct prueth_emac *emac) return; } + if (emac->duplex == DUPLEX_HALF) + fw_speed |= FW_LINK_SPEED_HD; + writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET); } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index c09ecb3da7..411898a4f3 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -19,11 +19,11 @@ #include #include #include -#include #include #include -#include +#include #include +#include #include #include #include @@ -1029,6 +1029,8 @@ static void emac_adjust_link(struct net_device *ndev) * values */ if (emac->link) { + if (emac->duplex == DUPLEX_HALF) + icssg_config_half_duplex(emac); /* Set the RGMII cfg for gig en and full duplex */ icssg_update_rgmii_cfg(prueth->miig_rt, emac); @@ -1147,9 +1149,13 @@ static int emac_phy_connect(struct prueth_emac *emac) return -ENODEV; } + if (!emac->half_duplex) { + dev_dbg(prueth->dev, "half duplex mode is not supported\n"); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + } + /* remove unsupported modes */ - phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); @@ -1653,6 +1659,19 @@ static void emac_ndo_get_stats64(struct net_device *ndev, stats->tx_dropped = ndev->stats.tx_dropped; } +static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int ret; + + ret = snprintf(name, len, "p%d", emac->port_id); + if (ret >= len) + return -EINVAL; + + return 0; +} + static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, @@ -1663,6 +1682,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_set_rx_mode = emac_ndo_set_rx_mode, .ndo_eth_ioctl = emac_ndo_ioctl, .ndo_get_stats64 = emac_ndo_get_stats64, + .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, }; /* get emac_port corresponding to eth_node name */ @@ -1928,8 +1948,6 @@ static void prueth_put_cores(struct prueth *prueth, int slice) pru_rproc_put(prueth->pru[slice]); } -static const struct of_device_id prueth_dt_match[]; - static int prueth_probe(struct platform_device *pdev) { struct device_node *eth_node, *eth_ports_node; @@ -1938,7 +1956,6 @@ static int prueth_probe(struct platform_device *pdev) struct genpool_data_align gp_data = { .align = SZ_64K, }; - const struct of_device_id *match; struct device *dev = &pdev->dev; struct device_node *np; struct prueth *prueth; @@ -1948,17 +1965,13 @@ static int prueth_probe(struct platform_device *pdev) np = dev->of_node; - match = of_match_device(prueth_dt_match, dev); - if (!match) - return -ENODEV; - prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); if (!prueth) return -ENOMEM; dev_set_drvdata(dev, prueth); prueth->pdev = pdev; - prueth->pdata = *(const struct prueth_pdata *)match->data; + prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); prueth->dev = dev; eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); @@ -2110,6 +2123,10 @@ static int prueth_probe(struct platform_device *pdev) eth0_node->name); goto exit_iep; } + + if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL)) + prueth->emac[PRUETH_MAC0]->half_duplex = 1; + prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; } @@ -2121,6 +2138,9 @@ static int prueth_probe(struct platform_device *pdev) goto netdev_exit; } + if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL)) + prueth->emac[PRUETH_MAC1]->half_duplex = 1; + prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; } @@ -2318,8 +2338,13 @@ static const struct prueth_pdata am654_icssg_pdata = { .quirk_10m_link_issue = 1, }; +static const struct prueth_pdata am64x_icssg_pdata = { + .fdqring_mode = K3_RINGACC_RING_MODE_RING, +}; + static const struct of_device_id prueth_dt_match[] = { { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata }, + { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, prueth_dt_match); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 3fe80a8758..8b6d6b4970 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -145,6 +145,7 @@ struct prueth_emac { struct icss_iep *iep; unsigned int rx_ts_enabled : 1; unsigned int tx_ts_enabled : 1; + unsigned int half_duplex : 1; /* DMA related */ struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES]; @@ -271,6 +272,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int emac_set_port_state(struct prueth_emac *emac, enum icssg_port_state_cmd state); void icssg_config_set_speed(struct prueth_emac *emac); +void icssg_config_half_duplex(struct prueth_emac *emac); /* Buffer queue helpers */ int icssg_queue_pop(struct prueth *prueth, u8 queue); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index d829113c16..11b90e1da0 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2228,7 +2228,7 @@ probe_quit: return ret; } -static int netcp_remove(struct platform_device *pdev) +static void netcp_remove(struct platform_device *pdev) { struct netcp_device *netcp_device = platform_get_drvdata(pdev); struct netcp_intf *netcp_intf, *netcp_tmp; @@ -2256,7 +2256,6 @@ static int netcp_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); - return 0; } static const struct of_device_id of_match[] = { @@ -2271,7 +2270,7 @@ static struct platform_driver netcp_driver = { .of_match_table = of_match, }, .probe = netcp_probe, - .remove = netcp_remove, + .remove_new = netcp_remove, }; module_platform_driver(netcp_driver); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 2adf82a32b..02cb6474f6 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1735,8 +1735,8 @@ static const struct netcp_ethtool_stat xgbe10_et_stats[] = { static void keystone_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); - strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); + strscpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); + strscpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); } static u32 keystone_get_msglevel(struct net_device *ndev) diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 14cf6ecf6d..6e3758dfbd 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1434,14 +1434,10 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) u32 dmactl = tc_readl(&tr->DMA_Ctl); if (!(dmactl & DMA_IntMask)) { - /* disable interrupts */ - tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); - if (napi_schedule_prep(&lp->napi)) + if (napi_schedule_prep(&lp->napi)) { + /* disable interrupts */ + tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); __napi_schedule(&lp->napi); - else { - printk(KERN_ERR "%s: interrupt taken in poll\n", - dev->name); - BUG(); } (void)tc_readl(&tr->Int_Src); /* flush */ return IRQ_HANDLED; diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index d09d352e1c..554aff7c8f 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1660,7 +1660,7 @@ static void tsi108_timed_checker(struct timer_list *t) mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL); } -static int tsi108_ether_remove(struct platform_device *pdev) +static void tsi108_ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct tsi108_prv_data *priv = netdev_priv(dev); @@ -1670,15 +1670,13 @@ static int tsi108_ether_remove(struct platform_device *pdev) iounmap(priv->regs); iounmap(priv->phyregs); free_netdev(dev); - - return 0; } /* Structure for a device driver */ static struct platform_driver tsi_eth_driver = { .probe = tsi108_init_one, - .remove = tsi108_ether_remove, + .remove_new = tsi108_ether_remove, .driver = { .name = "tsi-ethernet", }, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 3e09e50364..e80c029488 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2443,7 +2443,7 @@ static void rhine_remove_one_pci(struct pci_dev *pdev) pci_disable_device(pdev); } -static int rhine_remove_one_platform(struct platform_device *pdev) +static void rhine_remove_one_platform(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); @@ -2453,8 +2453,6 @@ static int rhine_remove_one_platform(struct platform_device *pdev) iounmap(rp->base); free_netdev(dev); - - return 0; } static void rhine_shutdown_pci(struct pci_dev *pdev) @@ -2572,7 +2570,7 @@ static struct pci_driver rhine_driver_pci = { static struct platform_driver rhine_driver_platform = { .probe = rhine_init_one_platform, - .remove = rhine_remove_one_platform, + .remove_new = rhine_remove_one_platform, .driver = { .name = DRV_NAME, .of_match_table = rhine_of_tbl, diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 731f689412..1c6b2a9bba 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2957,11 +2957,9 @@ static int velocity_platform_probe(struct platform_device *pdev) return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM); } -static int velocity_platform_remove(struct platform_device *pdev) +static void velocity_platform_remove(struct platform_device *pdev) { velocity_remove(&pdev->dev); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -3249,7 +3247,7 @@ static struct pci_driver velocity_pci_driver = { static struct platform_driver velocity_platform_driver = { .probe = velocity_platform_probe, - .remove = velocity_platform_remove, + .remove_new = velocity_platform_remove, .driver = { .name = "via-velocity", .of_match_table = velocity_of_ids, diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 93cb6f2294..ddc5f6d20b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -3,9 +3,171 @@ #include #include +#include #include "wx_type.h" #include "wx_ethtool.h" +#include "wx_hw.h" + +struct wx_stats { + char stat_string[ETH_GSTRING_LEN]; + size_t sizeof_stat; + off_t stat_offset; +}; + +#define WX_STAT(str, m) { \ + .stat_string = str, \ + .sizeof_stat = sizeof(((struct wx *)0)->m), \ + .stat_offset = offsetof(struct wx, m) } + +static const struct wx_stats wx_gstrings_stats[] = { + WX_STAT("rx_dma_pkts", stats.gprc), + WX_STAT("tx_dma_pkts", stats.gptc), + WX_STAT("rx_dma_bytes", stats.gorc), + WX_STAT("tx_dma_bytes", stats.gotc), + WX_STAT("rx_total_pkts", stats.tpr), + WX_STAT("tx_total_pkts", stats.tpt), + WX_STAT("rx_long_length_count", stats.roc), + WX_STAT("rx_short_length_count", stats.ruc), + WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + WX_STAT("os2bmc_tx_by_host", stats.o2bspc), + WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), + WX_STAT("rx_no_dma_resources", stats.rdmdrop), + WX_STAT("tx_busy", tx_busy), + WX_STAT("non_eop_descs", non_eop_descs), + WX_STAT("tx_restart_queue", restart_queue), + WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), + WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +}; + +/* drivers allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define WX_NUM_RX_QUEUES netdev->num_tx_queues +#define WX_NUM_TX_QUEUES netdev->num_tx_queues + +#define WX_QUEUE_STATS_LEN ( \ + (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ + (sizeof(struct wx_queue_stats) / sizeof(u64))) +#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) + +int wx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return WX_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(wx_get_sset_count); + +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string); + for (i = 0; i < netdev->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + } + for (i = 0; i < WX_NUM_RX_QUEUES; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + } + break; + } +} +EXPORT_SYMBOL(wx_get_strings); + +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_ring *ring; + unsigned int start; + int i, j; + char *p; + + wx_update_stats(wx); + + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { + p = (char *)wx + wx_gstrings_stats[i].stat_offset; + data[i] = (wx_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = wx->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < WX_NUM_RX_QUEUES; j++) { + ring = wx->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } +} +EXPORT_SYMBOL(wx_get_ethtool_stats); + +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + mac_stats->MulticastFramesXmittedOK = hwstats->mptc; + mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; + mac_stats->MulticastFramesReceivedOK = hwstats->mprc; + mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; +} +EXPORT_SYMBOL(wx_get_mac_stats); + +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; + stats->rx_pause_frames = hwstats->lxonoffrxc; +} +EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -14,5 +176,12 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); + if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { + info->n_stats = WX_STATS_LEN - + (WX_NUM_TX_QUEUES - wx->num_tx_queues) * + (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; + } else { + info->n_stats = WX_STATS_LEN; + } } EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index e85538c694..16d1a09369 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -4,5 +4,13 @@ #ifndef _WX_ETHTOOL_H_ #define _WX_ETHTOOL_H_ +int wx_get_sset_count(struct net_device *netdev, int sset); +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data); +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats); +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 52130df26a..533e912af0 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -12,6 +12,98 @@ #include "wx_lib.h" #include "wx_hw.h" +static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22); + +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22); + +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45); + +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45); + static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; @@ -1911,6 +2003,105 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) } EXPORT_SYMBOL(wx_vlan_rx_kill_vid); +/** + * wx_update_stats - Update the board statistics counters. + * @wx: board private structure + **/ +void wx_update_stats(struct wx *wx) +{ + struct wx_hw_stats *hwstats = &wx->stats; + + u64 non_eop_descs = 0, alloc_rx_buff_failed = 0; + u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0; + u64 restart_queue = 0, tx_busy = 0; + u32 i; + + /* gather some stats to the wx struct that are per queue */ + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *rx_ring = wx->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + } + wx->non_eop_descs = non_eop_descs; + wx->alloc_rx_buff_failed = alloc_rx_buff_failed; + wx->hw_csum_rx_error = hw_csum_rx_error; + wx->hw_csum_rx_good = hw_csum_rx_good; + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *tx_ring = wx->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + } + wx->restart_queue = restart_queue; + wx->tx_busy = tx_busy; + + hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT); + hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT); + hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB); + hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB); + hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC); + hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC); + hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC); + hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT); + hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT); + hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT); + hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); + hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + + for (i = 0; i < wx->mac.max_rx_queues; i++) + hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); +} +EXPORT_SYMBOL(wx_update_stats); + +/** + * wx_clear_hw_cntrs - Generic clear hardware counters + * @wx: board private structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +void wx_clear_hw_cntrs(struct wx *wx) +{ + u16 i = 0; + + for (i = 0; i < wx->mac.max_rx_queues; i++) + wr32(wx, WX_PX_MPRC(i), 0); + + rd32(wx, WX_RDM_PKT_CNT); + rd32(wx, WX_TDM_PKT_CNT); + rd64(wx, WX_RDM_BYTE_CNT_LSB); + rd32(wx, WX_TDM_BYTE_CNT_LSB); + rd32(wx, WX_RDM_DRP_PKT); + rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + rd32(wx, WX_RDB_LXONTXC); + rd32(wx, WX_RDB_LXOFFTXC); + rd32(wx, WX_MAC_LXONOFFRXC); +} +EXPORT_SYMBOL(wx_clear_hw_cntrs); + /** * wx_start_hw - Prepare hardware for Tx/Rx * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 0b3447bc6f..12c20a7c36 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,6 +4,13 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ +#include + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum); +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value); +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum); +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value); void wx_intr_enable(struct wx *wx, u64 qmask); void wx_irq_disable(struct wx *wx); int wx_check_flash_load(struct wx *wx, u32 check_bit); @@ -34,5 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void wx_update_stats(struct wx *wx); +void wx_clear_hw_cntrs(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index e078f4071d..347d3cec02 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -421,6 +421,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, return false; rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; return true; } @@ -654,6 +655,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; break; } @@ -809,9 +811,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - netif_running(tx_ring->netdev)) + netif_running(tx_ring->netdev)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } } return !!budget; @@ -888,6 +892,7 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; return 0; } @@ -1465,8 +1470,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> frags[f])); - if (wx_maybe_stop_tx(tx_ring, count + 3)) + if (wx_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; @@ -2595,8 +2602,11 @@ void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; int i; + wx_update_stats(wx); + rcu_read_lock(); for (i = 0; i < wx->num_rx_queues; i++) { struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); @@ -2632,6 +2642,12 @@ void wx_get_stats64(struct net_device *netdev, } rcu_read_unlock(); + + hwstats = &wx->stats; + stats->rx_errors = hwstats->crcerrs + hwstats->rlec; + stats->multicast = hwstats->qmprc; + stats->rx_length_errors = hwstats->rlec; + stats->rx_crc_errors = hwstats->crcerrs; } EXPORT_SYMBOL(wx_get_stats64); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index c555af9ed5..83f9bb7b3c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -59,6 +59,25 @@ #define WX_TS_ALARM_ST_DALARM BIT(1) #define WX_TS_ALARM_ST_ALARM BIT(0) +/* statistic */ +#define WX_TX_FRAME_CNT_GOOD_BAD_L 0x1181C +#define WX_TX_BC_FRAMES_GOOD_L 0x11824 +#define WX_TX_MC_FRAMES_GOOD_L 0x1182C +#define WX_RX_FRAME_CNT_GOOD_BAD_L 0x11900 +#define WX_RX_BC_FRAMES_GOOD_L 0x11918 +#define WX_RX_MC_FRAMES_GOOD_L 0x11920 +#define WX_RX_CRC_ERROR_FRAMES_L 0x11928 +#define WX_RX_LEN_ERROR_FRAMES_L 0x11978 +#define WX_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define WX_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define WX_MAC_LXONOFFRXC 0x11E0C + +/*********************** Receive DMA registers **************************/ +#define WX_RDM_DRP_PKT 0x12500 +#define WX_RDM_PKT_CNT 0x12504 +#define WX_RDM_BYTE_CNT_LSB 0x12508 +#define WX_RDM_BMC2OS_CNT 0x12510 + /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 @@ -94,6 +113,9 @@ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) #define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_PKT_CNT 0x18308 +#define WX_TDM_BYTE_CNT_LSB 0x1830C +#define WX_TDM_OS2BMC_CNT 0x18314 #define WX_TDM_RP_RATE 0x18404 /***************************** RDB registers *********************************/ @@ -106,6 +128,8 @@ /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +#define WX_RDB_LXOFFTXC 0x19218 +#define WX_RDB_LXONTXC 0x1921C /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -218,6 +242,8 @@ #define WX_MNG_MBOX_CTL 0x1E044 #define WX_MNG_MBOX_CTL_SWRDY BIT(0) #define WX_MNG_MBOX_CTL_FWRDY BIT(2) +#define WX_MNG_BMC2OS_CNT 0x1E090 +#define WX_MNG_OS2BMC_CNT 0x1E094 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -251,6 +277,7 @@ enum WX_MSCA_CMD_value { #define WX_MSCC_SADDR BIT(18) #define WX_MSCC_BUSY BIT(22) #define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) +#define WX_MDIO_CLAUSE_SELECT 0x11220 #define WX_MMC_CONTROL 0x11800 #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ @@ -300,6 +327,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) #define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) @@ -766,9 +794,16 @@ struct wx_queue_stats { u64 bytes; }; +struct wx_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; +}; + struct wx_rx_queue_stats { + u64 non_eop_descs; u64 csum_good_cnt; u64 csum_err; + u64 alloc_rx_buff_failed; }; /* iterator for handling rings in ring container */ @@ -812,6 +847,7 @@ struct wx_ring { struct wx_queue_stats stats; struct u64_stats_sync syncp; union { + struct wx_tx_queue_stats tx_stats; struct wx_rx_queue_stats rx_stats; }; } ____cacheline_internodealigned_in_smp; @@ -843,6 +879,33 @@ enum wx_isb_idx { WX_ISB_MAX }; +/* Statistics counters collected by the MAC */ +struct wx_hw_stats { + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 tpr; + u64 tpt; + u64 bprc; + u64 bptc; + u64 mprc; + u64 mptc; + u64 roc; + u64 ruc; + u64 lxonoffrxc; + u64 lxontxc; + u64 lxofftxc; + u64 o2bgptc; + u64 b2ospc; + u64 o2bspc; + u64 b2ogprc; + u64 rdmdrop; + u64 crcerrs; + u64 rlec; + u64 qmprc; +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -918,6 +981,14 @@ struct wx { u32 wol; u16 bd_number; + + struct wx_hw_stats stats; + u64 tx_busy; + u64 non_eop_descs; + u64 restart_queue; + u64 hw_csum_rx_good; + u64 hw_csum_rx_error; + u64 alloc_rx_buff_failed; }; #define WX_INTR_ALL (~0ULL) @@ -951,6 +1022,17 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) wr32(wx, reg, val); } +static inline u64 +rd64(struct wx *wx, u32 reg) +{ + u64 lsb, msb; + + lsb = rd32(wx, reg); + msb = rd32(wx, reg + 4); + + return (lsb | msb << 32); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index ec0e869e9a..afbdf69190 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -49,6 +49,11 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_wol = ngbe_get_wol, .set_wol = ngbe_set_wol, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 6562a2de95..6459bc1d7c 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -85,6 +85,8 @@ int ngbe_reset_hw(struct wx *wx) } ngbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a4d63d2f3c..8db804543e 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -330,6 +330,8 @@ static void ngbe_disable_device(struct wx *wx) wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); } + + wx_update_stats(wx); } static void ngbe_down(struct wx *wx) @@ -675,11 +677,6 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, wx); - netif_info(wx, probe, netdev, - "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_register: diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index 591f5b7b6d..6302ecca71 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re return 0; } -static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c22 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c22 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) { struct wx *wx = bus->priv; @@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) if (wx->mac_type == em_mac_type_mdi) phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); else - phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum); return phy_data; } @@ -162,7 +51,7 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, if (wx->mac_type == em_mac_type_mdi) ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); else - ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); return ret; } @@ -262,8 +151,8 @@ int ngbe_mdio_init(struct wx *wx) mii_bus->priv = wx; if (wx->mac_type == em_mac_type_rgmii) { - mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; - mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + mii_bus->read_c45 = wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = wx_phy_write_reg_mdi_c45; } snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 72c8cd2d55..ff754d69bd 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -59,9 +59,6 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E -/* Media-dependent registers. */ -#define NGBE_MDIO_CLAUSE_SELECT 0x11220 - /* GPIO Registers */ #define NGBE_GPIO_DR 0x14800 #define NGBE_GPIO_DDR 0x14804 diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 859da11258..3f336a088e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -39,6 +39,11 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = txgbe_set_link_ksettings, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 3727452502..d6b2b3c781 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -70,114 +70,6 @@ static void txgbe_init_thermal_sensor_thresh(struct wx *wx) wr32(wx, WX_TS_DALARM_THRE, 614); } -/** - * txgbe_read_pba_string - Reads part number string from EEPROM - * @wx: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) -{ - u16 pba_ptr, offset, length, data; - int ret_val; - - if (!pba_num) { - wx_err(wx, "PBA string buffer was null\n"); - return -EINVAL; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, - &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, - &pba_ptr); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - /* if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wx, "NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return 0; - } - - ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - wx_err(wx, "NVM PBA number section invalid length\n"); - return -EINVAL; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return 0; -} - /** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum * @wx: pointer to hardware structure @@ -306,6 +198,8 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index abc729eb18..1f3ecf60e3 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -6,7 +6,6 @@ int txgbe_disable_sec_tx_path(struct wx *wx); void txgbe_enable_sec_tx_path(struct wx *wx); -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); int txgbe_reset_hw(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index d60c26ba0b..526250102d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -286,6 +286,8 @@ static void txgbe_disable_device(struct wx *wx) /* Disable the Tx DMA engine */ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + + wx_update_stats(wx); } static void txgbe_down(struct wx *wx) @@ -536,7 +538,6 @@ static int txgbe_probe(struct pci_dev *pdev, u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; u16 build = 0, major = 0, patch = 0; - u8 part_str[TXGBE_PBANUM_LENGTH]; u32 etrack_id = 0; err = pci_enable_device_mem(pdev); @@ -734,13 +735,6 @@ static int txgbe_probe(struct pci_dev *pdev, else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); - /* First try to read PBA as a string */ - err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_remove_phy: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 4159c84035..b6c06adb86 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -647,58 +647,6 @@ static int txgbe_sfp_register(struct txgbe *txgbe) return 0; } -static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, - int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int txgbe_ext_phy_init(struct txgbe *txgbe) { struct phy_device *phydev; @@ -715,8 +663,8 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) return -ENOMEM; mii_bus->name = "txgbe_mii_bus"; - mii_bus->read_c45 = &txgbe_phy_read; - mii_bus->write_c45 = &txgbe_phy_write; + mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45; mii_bus->parent = &pdev->dev; mii_bus->phy_mask = GENMASK(31, 1); mii_bus->priv = wx; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 51199c355f..3ba9ce43f3 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -88,9 +88,6 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 -/* Part Number String Length */ -#define TXGBE_PBANUM_LENGTH 32 - /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -98,9 +95,6 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 -#define TXGBE_PBANUM0_PTR 0x05 -#define TXGBE_PBANUM1_PTR 0x06 -#define TXGBE_PBANUM_PTR_GUARD 0xFAFA #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 7c52796273..990a3cce8c 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -14,8 +14,8 @@ #include #include #include +#include #include -#include #include #include "w5100.h" @@ -420,7 +420,6 @@ MODULE_DEVICE_TABLE(of, w5100_of_match); static int w5100_spi_probe(struct spi_device *spi) { - const struct of_device_id *of_id; const struct w5100_ops *ops; kernel_ulong_t driver_data; const void *mac = NULL; @@ -432,14 +431,7 @@ static int w5100_spi_probe(struct spi_device *spi) if (!ret) mac = tmpmac; - if (spi->dev.of_node) { - of_id = of_match_device(w5100_of_match, &spi->dev); - if (!of_id) - return -ENODEV; - driver_data = (kernel_ulong_t)of_id->data; - } else { - driver_data = spi_get_device_id(spi)->driver_data; - } + driver_data = (uintptr_t)spi_get_device_match_data(spi); switch (driver_data) { case W5100: diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 634946e87e..b26fd15c25 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -930,8 +930,8 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) if (priv->ops->may_sleep) queue_work(priv->xfer_wq, &priv->rx_work); - else if (napi_schedule_prep(&priv->napi)) - __napi_schedule(&priv->napi); + else + napi_schedule(&priv->napi); } return IRQ_HANDLED; @@ -1062,11 +1062,9 @@ static int w5100_mmio_probe(struct platform_device *pdev) mac_addr, irq, data ? data->link_gpio : -EINVAL); } -static int w5100_mmio_remove(struct platform_device *pdev) +static void w5100_mmio_remove(struct platform_device *pdev) { w5100_remove(&pdev->dev); - - return 0; } void *w5100_ops_priv(const struct net_device *ndev) @@ -1273,6 +1271,6 @@ static struct platform_driver w5100_mmio_driver = { .pm = &w5100_pm_ops, }, .probe = w5100_mmio_probe, - .remove = w5100_mmio_remove, + .remove_new = w5100_mmio_remove, }; module_platform_driver(w5100_mmio_driver); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index b0958fe811..3318b50a59 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -627,7 +627,7 @@ err_register: return err; } -static int w5300_remove(struct platform_device *pdev) +static void w5300_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct w5300_priv *priv = netdev_priv(ndev); @@ -639,7 +639,6 @@ static int w5300_remove(struct platform_device *pdev) unregister_netdev(ndev); free_netdev(ndev); - return 0; } #ifdef CONFIG_PM_SLEEP @@ -683,7 +682,7 @@ static struct platform_driver w5300_driver = { .pm = &w5300_pm_ops, }, .probe = w5300_probe, - .remove = w5300_remove, + .remove_new = w5300_remove, }; module_platform_driver(w5300_driver); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1444b855e7..9df39cf8b0 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1626,7 +1626,7 @@ err_sysfs_create: return rc; } -static int temac_remove(struct platform_device *pdev) +static void temac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct temac_local *lp = netdev_priv(ndev); @@ -1636,7 +1636,6 @@ static int temac_remove(struct platform_device *pdev) if (lp->phy_node) of_node_put(lp->phy_node); temac_mdio_teardown(lp); - return 0; } static const struct of_device_id temac_of_match[] = { @@ -1650,7 +1649,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match); static struct platform_driver temac_driver = { .probe = temac_probe, - .remove = temac_remove, + .remove_new = temac_remove, .driver = { .name = "xilinx_temac", .of_match_table = temac_of_match, diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 3297aff969..bf6e339904 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2183,7 +2183,7 @@ free_netdev: return ret; } -static int axienet_remove(struct platform_device *pdev) +static void axienet_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct axienet_local *lp = netdev_priv(ndev); @@ -2202,8 +2202,6 @@ static int axienet_remove(struct platform_device *pdev) clk_disable_unprepare(lp->axi_clk); free_netdev(ndev); - - return 0; } static void axienet_shutdown(struct platform_device *pdev) @@ -2256,7 +2254,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, static struct platform_driver axienet_driver = { .probe = axienet_probe, - .remove = axienet_remove, + .remove_new = axienet_remove, .shutdown = axienet_shutdown, .driver = { .name = "xilinx_axienet", diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b358ecc672..765aa516aa 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1180,10 +1180,8 @@ error: * This function is called if a device is physically removed from the system or * if the driver module is being unloaded. It frees any resources allocated to * the device. - * - * Return: 0, always. */ -static int xemaclite_of_remove(struct platform_device *of_dev) +static void xemaclite_of_remove(struct platform_device *of_dev) { struct net_device *ndev = platform_get_drvdata(of_dev); @@ -1202,8 +1200,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev) lp->phy_node = NULL; free_netdev(ndev); - - return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1262,7 +1258,7 @@ static struct platform_driver xemaclite_of_driver = { .of_match_table = xemaclite_of_match, }, .probe = xemaclite_of_probe, - .remove = xemaclite_of_remove, + .remove_new = xemaclite_of_remove, }; module_platform_driver(xemaclite_of_driver); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 3b0c5f1774..e0d26148df 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,15 @@ #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) #define REGS_SIZE 0x1000 -#define MAX_MRU 1536 /* 0x600 */ + +/* MRU is said to be 14320 in a code dump, the SW manual says that + * MRU/MTU is 16320 and includes VLAN and ethernet headers. + * See "IXP400 Software Programmer's Guide" section 10.3.2, page 161. + * + * FIXME: we have chosen the safe default (14320) but if you can test + * jumboframes, experiment with 16320 and see what happens! + */ +#define MAX_MRU (14320 - VLAN_ETH_HLEN) #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) #define NAPI_WEIGHT 16 @@ -154,7 +163,6 @@ typedef void buffer_t; /* Information about built-in Ethernet MAC interfaces */ struct eth_plat_info { - u8 phy; /* MII PHY ID, 0 - 31 */ u8 rxq; /* configurable, currently 0 - 31 only */ u8 txreadyq; u8 hwaddr[ETH_ALEN]; @@ -714,9 +722,9 @@ static int eth_poll(struct napi_struct *napi, int budget) napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_below_low_watermark(rxq) && - napi_reschedule(napi)) { /* not empty again */ + napi_schedule(napi)) { /* not empty again */ #if DEBUG_RX - netdev_debug(dev, "eth_poll napi_reschedule succeeded\n"); + netdev_debug(dev, "eth_poll napi_schedule succeeded\n"); #endif qmgr_disable_irq(rxq); continue; @@ -1182,6 +1190,54 @@ static void destroy_queues(struct port *port) } } +static int ixp4xx_do_change_mtu(struct net_device *dev, int new_mtu) +{ + struct port *port = netdev_priv(dev); + struct npe *npe = port->npe; + int framesize, chunks; + struct msg msg = {}; + + /* adjust for ethernet headers */ + framesize = new_mtu + VLAN_ETH_HLEN; + /* max rx/tx 64 byte chunks */ + chunks = DIV_ROUND_UP(framesize, 64); + + msg.cmd = NPE_SETMAXFRAMELENGTHS; + msg.eth_id = port->id; + + /* Firmware wants to know buffer size in 64 byte chunks */ + msg.byte2 = chunks << 8; + msg.byte3 = chunks << 8; + + msg.byte4 = msg.byte6 = framesize >> 8; + msg.byte5 = msg.byte7 = framesize & 0xff; + + if (npe_send_recv_message(npe, &msg, "ETH_SET_MAX_FRAME_LENGTH")) + return -EIO; + netdev_dbg(dev, "set MTU on NPE %s to %d bytes\n", + npe_name(npe), new_mtu); + + return 0; +} + +static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + + /* MTU can only be changed when the interface is up. We also + * set the MTU from dev->mtu when opening the device. + */ + if (dev->flags & IFF_UP) { + ret = ixp4xx_do_change_mtu(dev, new_mtu); + if (ret < 0) + return ret; + } + + dev->mtu = new_mtu; + + return 0; +} + static int eth_open(struct net_device *dev) { struct port *port = netdev_priv(dev); @@ -1232,6 +1288,8 @@ static int eth_open(struct net_device *dev) if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) return -EIO; + ixp4xx_do_change_mtu(dev, dev->mtu); + if ((err = request_queues(port)) != 0) return err; @@ -1374,6 +1432,7 @@ static int eth_close(struct net_device *dev) static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_open = eth_open, .ndo_stop = eth_close, + .ndo_change_mtu = ixp4xx_eth_change_mtu, .ndo_start_xmit = eth_xmit, .ndo_set_rx_mode = eth_set_mcast_list, .ndo_eth_ioctl = eth_ioctl, @@ -1488,6 +1547,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) ndev->dev.dma_mask = dev->dma_mask; ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; + ndev->min_mtu = ETH_MIN_MTU; + ndev->max_mtu = MAX_MRU; + netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT); if (!(port->npe = npe_request(NPE_ID(port->id)))) @@ -1520,7 +1582,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) if ((err = register_netdev(ndev))) goto err_phy_dis; - netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy, + netdev_info(ndev, "%s: MII PHY %s on %s\n", ndev->name, phydev_name(phydev), npe_name(port->npe)); return 0; @@ -1533,7 +1595,7 @@ err_free_mem: return err; } -static int ixp4xx_eth_remove(struct platform_device *pdev) +static void ixp4xx_eth_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct phy_device *phydev = ndev->phydev; @@ -1544,7 +1606,6 @@ static int ixp4xx_eth_remove(struct platform_device *pdev) ixp4xx_mdio_remove(); npe_port_tab[NPE_ID(port->id)] = NULL; npe_release(port->npe); - return 0; } static const struct of_device_id ixp4xx_eth_of_match[] = { @@ -1560,7 +1621,7 @@ static struct platform_driver ixp4xx_eth_driver = { .of_match_table = of_match_ptr(ixp4xx_eth_of_match), }, .probe = ixp4xx_eth_probe, - .remove = ixp4xx_eth_remove, + .remove_new = ixp4xx_eth_remove, }; module_platform_driver(ixp4xx_eth_driver); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 2513be6d4e..cd8cf08477 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1030,7 +1030,7 @@ static int fjes_poll(struct napi_struct *napi, int budget) } if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { - napi_reschedule(napi); + napi_schedule(napi); } else { spin_lock(&hw->rx_status_lock); for (epidx = 0; epidx < hw->max_epid; epidx++) { diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 78f9d588f7..acd9c615d1 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -784,117 +784,21 @@ free_dst: return err; } -static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, - struct net_device *dev, - struct geneve_sock *gs4, - struct flowi4 *fl4, - const struct ip_tunnel_info *info, - __be16 dport, __be16 sport, - __u8 *full_tos) +static u8 geneve_get_dsfield(struct sk_buff *skb, struct net_device *dev, + const struct ip_tunnel_info *info, + bool *use_cache) { - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); - struct dst_cache *dst_cache; - struct rtable *rt = NULL; - __u8 tos; + u8 dsfield; - if (!gs4) - return ERR_PTR(-EIO); - - memset(fl4, 0, sizeof(*fl4)); - fl4->flowi4_mark = skb->mark; - fl4->flowi4_proto = IPPROTO_UDP; - fl4->daddr = info->key.u.ipv4.dst; - fl4->saddr = info->key.u.ipv4.src; - fl4->fl4_dport = dport; - fl4->fl4_sport = sport; - fl4->flowi4_flags = info->key.flow_flags; - - tos = info->key.tos; - if ((tos == 1) && !geneve->cfg.collect_md) { - tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb); - use_cache = false; - } - fl4->flowi4_tos = RT_TOS(tos); - if (full_tos) - *full_tos = tos; - - dst_cache = (struct dst_cache *)&info->dst_cache; - if (use_cache) { - rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); - if (rt) - return rt; - } - rt = ip_route_output_key(geneve->net, fl4); - if (IS_ERR(rt)) { - netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); - return ERR_PTR(-ENETUNREACH); - } - if (rt->dst.dev == dev) { /* is this necessary? */ - netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); - ip_rt_put(rt); - return ERR_PTR(-ELOOP); - } - if (use_cache) - dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr); - return rt; -} - -#if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, - struct net_device *dev, - struct geneve_sock *gs6, - struct flowi6 *fl6, - const struct ip_tunnel_info *info, - __be16 dport, __be16 sport) -{ - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct geneve_dev *geneve = netdev_priv(dev); - struct dst_entry *dst = NULL; - struct dst_cache *dst_cache; - __u8 prio; - - if (!gs6) - return ERR_PTR(-EIO); - - memset(fl6, 0, sizeof(*fl6)); - fl6->flowi6_mark = skb->mark; - fl6->flowi6_proto = IPPROTO_UDP; - fl6->daddr = info->key.u.ipv6.dst; - fl6->saddr = info->key.u.ipv6.src; - fl6->fl6_dport = dport; - fl6->fl6_sport = sport; - - prio = info->key.tos; - if ((prio == 1) && !geneve->cfg.collect_md) { - prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); - use_cache = false; - } - - fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label); - dst_cache = (struct dst_cache *)&info->dst_cache; - if (use_cache) { - dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); - if (dst) - return dst; - } - dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, - NULL); - if (IS_ERR(dst)) { - netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); - return ERR_PTR(-ENETUNREACH); - } - if (dst->dev == dev) { /* is this necessary? */ - netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr); - dst_release(dst); - return ERR_PTR(-ELOOP); + dsfield = info->key.tos; + if (dsfield == 1 && !geneve->cfg.collect_md) { + dsfield = ip_tunnel_get_dsfield(ip_hdr(skb), skb); + *use_cache = false; } - if (use_cache) - dst_cache_set_ip6(dst_cache, dst, &fl6->saddr); - return dst; + return dsfield; } -#endif static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, @@ -904,19 +808,28 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); const struct ip_tunnel_key *key = &info->key; struct rtable *rt; - struct flowi4 fl4; - __u8 full_tos; + bool use_cache; __u8 tos, ttl; __be16 df = 0; + __be32 saddr; __be16 sport; int err; if (!pskb_inet_may_pull(skb)) return -EINVAL; + if (!gs4) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + tos = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, - geneve->cfg.info.key.tp_dst, sport, &full_tos); + + rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, + &info->key, + sport, geneve->cfg.info.key.tp_dst, tos, + use_cache ? + (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -939,8 +852,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -ENOMEM; } - unclone->key.u.ipv4.dst = fl4.saddr; - unclone->key.u.ipv4.src = fl4.daddr; + unclone->key.u.ipv4.dst = saddr; + unclone->key.u.ipv4.src = info->key.u.ipv4.dst; } if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -954,13 +867,12 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -EMSGSIZE; } + tos = ip_tunnel_ecn_encap(tos, ip_hdr(skb), skb); if (geneve->cfg.collect_md) { - tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { - tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb); if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else @@ -988,7 +900,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) return err; - udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, + udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), !(info->key.tun_flags & TUNNEL_CSUM)); @@ -1004,7 +916,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); const struct ip_tunnel_key *key = &info->key; struct dst_entry *dst = NULL; - struct flowi6 fl6; + struct in6_addr saddr; + bool use_cache; __u8 prio, ttl; __be16 sport; int err; @@ -1012,9 +925,18 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (!pskb_inet_may_pull(skb)) return -EINVAL; + if (!gs6) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + prio = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, - geneve->cfg.info.key.tp_dst, sport); + + dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, + &saddr, key, sport, + geneve->cfg.info.key.tp_dst, prio, + use_cache ? + (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1036,8 +958,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -ENOMEM; } - unclone->key.u.ipv6.dst = fl6.saddr; - unclone->key.u.ipv6.src = fl6.daddr; + unclone->key.u.ipv6.dst = saddr; + unclone->key.u.ipv6.src = info->key.u.ipv6.dst; } if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -1051,12 +973,10 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -EMSGSIZE; } + prio = ip_tunnel_ecn_encap(prio, ip_hdr(skb), skb); if (geneve->cfg.collect_md) { - prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; } else { - prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), - ip_hdr(skb), skb); if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else @@ -1069,7 +989,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return err; udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, - &fl6.saddr, &fl6.daddr, prio, ttl, + &saddr, &key->u.ipv6.dst, prio, ttl, info->key.label, sport, geneve->cfg.info.key.tp_dst, !(info->key.tun_flags & TUNNEL_CSUM)); return 0; @@ -1137,35 +1057,54 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; - struct flowi4 fl4; - struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); + bool use_cache; + __be32 saddr; + u8 tos; + + if (!gs4) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + tos = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, - geneve->cfg.info.key.tp_dst, sport, NULL); + rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, + &info->key, + sport, geneve->cfg.info.key.tp_dst, + tos, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); - info->key.u.ipv4.src = fl4.saddr; + info->key.u.ipv4.src = saddr; #if IS_ENABLED(CONFIG_IPV6) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; - struct flowi6 fl6; - struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); + struct in6_addr saddr; + bool use_cache; + u8 prio; + + if (!gs6) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + prio = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, - geneve->cfg.info.key.tp_dst, sport); + dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, + &saddr, &info->key, sport, + geneve->cfg.info.key.tp_dst, prio, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); dst_release(dst); - info->key.u.ipv6.src = fl6.saddr; + info->key.u.ipv6.src = saddr; #endif } else { return -EINVAL; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index b1919278e9..2129ae42c7 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1907,20 +1907,20 @@ static int __init gtp_init(void) if (err < 0) goto error_out; - err = genl_register_family(>p_genl_family); + err = register_pernet_subsys(>p_net_ops); if (err < 0) goto unreg_rtnl_link; - err = register_pernet_subsys(>p_net_ops); + err = genl_register_family(>p_genl_family); if (err < 0) - goto unreg_genl_family; + goto unreg_pernet_subsys; pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; -unreg_genl_family: - genl_unregister_family(>p_genl_family); +unreg_pernet_subsys: + unregister_pernet_subsys(>p_net_ops); unreg_rtnl_link: rtnl_link_unregister(>p_link_ops); error_out: diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index a94c7bd5db..25b1f929c4 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -94,8 +94,8 @@ config BAYCOM_SER_FDX driver, "BAYCOM ser12 half-duplex driver for AX.25" is the old driver and still provided in case this driver does not work with your serial interface chip. To configure the driver, use the sethdlc - utility available in the standard ax25 utilities package. For - information on the modems, see and + utility available in the standard ax25 utilities package. + For more information on the modems, see . To compile this driver as a module, choose M here: the module @@ -112,8 +112,7 @@ config BAYCOM_SER_HDX still provided in case your serial interface chip does not work with the full-duplex driver. This driver is deprecated. To configure the driver, use the sethdlc utility available in the standard ax25 - utilities package. For information on the modems, see - and + utilities package. For more information on the modems, see . To compile this driver as a module, choose M here: the module @@ -127,8 +126,8 @@ config BAYCOM_PAR This is a driver for Baycom style simple amateur radio modems that connect to a parallel interface. The driver supports the picpar and par96 designs. To configure the driver, use the sethdlc utility - available in the standard ax25 utilities package. For information on - the modems, see and the file + available in the standard ax25 utilities package. + For more information on the modems, see . To compile this driver as a module, choose M here: the module @@ -142,8 +141,8 @@ config BAYCOM_EPP This is a driver for Baycom style simple amateur radio modems that connect to a parallel interface. The driver supports the EPP designs. To configure the driver, use the sethdlc utility available - in the standard ax25 utilities package. For information on the - modems, see and the file + in the standard ax25 utilities package. + For more information on the modems, see . To compile this driver as a module, choose M here: the module diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 83ff882f5d..ccfc83857c 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -1074,7 +1074,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr, return 0; case HDLCDRVCTL_DRIVERNAME: - strncpy(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername)); + strscpy_pad(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername)); break; case HDLCDRVCTL_GETMODE: @@ -1091,7 +1091,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr, return baycom_setmode(bc, hi.data.modename); case HDLCDRVCTL_MODELIST: - strncpy(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x", + strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x", sizeof(hi.data.modename)); break; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 82e9796c8f..a6fcbda64e 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -708,7 +708,10 @@ void netvsc_device_remove(struct hv_device *device) /* Disable NAPI and disassociate its context from the device. */ for (i = 0; i < net_device->num_chn; i++) { /* See also vmbus_reset_channel_cb(). */ - napi_disable(&net_device->chan_table[i].napi); + /* only disable enabled NAPI channel */ + if (i < ndev->real_num_rx_queues) + napi_disable(&net_device->chan_table[i].napi); + netif_napi_del(&net_device->chan_table[i].napi); } @@ -851,7 +854,7 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - fallthrough; + break; case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: if (msglen < sizeof(struct nvsp_message_header) + @@ -860,7 +863,7 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - fallthrough; + break; case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: if (msglen < sizeof(struct nvsp_message_header) + @@ -869,7 +872,7 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - fallthrough; + break; case NVSP_MSG5_TYPE_SUBCHANNEL: if (msglen < sizeof(struct nvsp_message_header) + @@ -878,10 +881,6 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - /* Copy the response back */ - memcpy(&net_device->channel_init_pkt, nvsp_packet, - sizeof(struct nvsp_message)); - complete(&net_device->channel_init_wait); break; case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: @@ -904,13 +903,19 @@ static void netvsc_send_completion(struct net_device *ndev, netvsc_send_tx_complete(ndev, net_device, incoming_channel, desc, budget); - break; + return; default: netdev_err(ndev, "Unknown send completion type %d received!!\n", nvsp_packet->hdr.msg_type); + return; } + + /* Copy the response back */ + memcpy(&net_device->channel_init_pkt, nvsp_packet, + sizeof(struct nvsp_message)); + complete(&net_device->channel_init_wait); } static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index cd15d7b380..9d2d66a4aa 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,6 +42,10 @@ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) +/* Macros to define the context of vf registration */ +#define VF_REG_IN_PROBE 1 +#define VF_REG_IN_NOTIFIER 2 + static unsigned int ring_size __ro_after_init = 128; module_param(ring_size, uint, 0444); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); @@ -2183,7 +2187,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) } static int netvsc_vf_join(struct net_device *vf_netdev, - struct net_device *ndev) + struct net_device *ndev, int context) { struct net_device_context *ndev_ctx = netdev_priv(ndev); int ret; @@ -2206,7 +2210,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto upper_link_failed; } - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + /* If this registration is called from probe context vf_takeover + * is taken care of later in probe itself. + */ + if (context == VF_REG_IN_NOTIFIER) + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); @@ -2344,7 +2352,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev) return NOTIFY_DONE; } -static int netvsc_register_vf(struct net_device *vf_netdev) +static int netvsc_register_vf(struct net_device *vf_netdev, int context) { struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; @@ -2384,7 +2392,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - if (netvsc_vf_join(vf_netdev, ndev) != 0) + if (netvsc_vf_join(vf_netdev, ndev, context) != 0) return NOTIFY_DONE; dev_hold(vf_netdev); @@ -2482,10 +2490,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_OK; } +static int check_dev_is_matching_vf(struct net_device *event_ndev) +{ + /* Skip NetVSC interfaces */ + if (event_ndev->netdev_ops == &device_ops) + return -ENODEV; + + /* Avoid non-Ethernet type devices */ + if (event_ndev->type != ARPHRD_ETHER) + return -ENODEV; + + /* Avoid Vlan dev with same MAC registering as VF */ + if (is_vlan_dev(event_ndev)) + return -ENODEV; + + /* Avoid Bonding master dev with same MAC registering as VF */ + if (netif_is_bond_master(event_ndev)) + return -ENODEV; + + return 0; +} + static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { - struct net_device *net = NULL; + struct net_device *net = NULL, *vf_netdev; struct net_device_context *net_device_ctx; struct netvsc_device_info *device_info = NULL; struct netvsc_device *nvdev; @@ -2597,6 +2626,30 @@ static int netvsc_probe(struct hv_device *dev, } list_add(&net_device_ctx->list, &netvsc_dev_list); + + /* When the hv_netvsc driver is unloaded and reloaded, the + * NET_DEVICE_REGISTER for the vf device is replayed before probe + * is complete. This is because register_netdevice_notifier() gets + * registered before vmbus_driver_register() so that callback func + * is set before probe and we don't miss events like NETDEV_POST_INIT + * So, in this section we try to register the matching vf device that + * is present as a netdevice, knowing that its register call is not + * processed in the netvsc_netdev_notifier(as probing is progress and + * get_netvsc_byslot fails). + */ + for_each_netdev(dev_net(net), vf_netdev) { + ret = check_dev_is_matching_vf(vf_netdev); + if (ret != 0) + continue; + + if (net != get_netvsc_byslot(vf_netdev)) + continue; + + netvsc_prepare_bonding(vf_netdev); + netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); + __netvsc_vf_setup(net, vf_netdev); + break; + } rtnl_unlock(); netvsc_devinfo_put(device_info); @@ -2752,28 +2805,17 @@ static int netvsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + int ret = 0; - /* Skip our own events */ - if (event_dev->netdev_ops == &device_ops) - return NOTIFY_DONE; - - /* Avoid non-Ethernet type devices */ - if (event_dev->type != ARPHRD_ETHER) - return NOTIFY_DONE; - - /* Avoid Vlan dev with same MAC registering as VF */ - if (is_vlan_dev(event_dev)) - return NOTIFY_DONE; - - /* Avoid Bonding master dev with same MAC registering as VF */ - if (netif_is_bond_master(event_dev)) + ret = check_dev_is_matching_vf(event_dev); + if (ret != 0) return NOTIFY_DONE; switch (event) { case NETDEV_POST_INIT: return netvsc_prepare_bonding(event_dev); case NETDEV_REGISTER: - return netvsc_register_vf(event_dev); + return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER); case NETDEV_UNREGISTER: return netvsc_unregister_vf(event_dev); case NETDEV_UP: diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 78253ad57b..2c1b5def4a 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -454,5 +454,6 @@ static void __exit ifb_cleanup_module(void) module_init(ifb_init_module); module_exit(ifb_cleanup_module); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intermediate Functional Block (ifb) netdevice driver for sharing of resources and ingress packet queuing"); MODULE_AUTHOR("Jamal Hadi Salim"); MODULE_ALIAS_RTNL_LINK("ifb"); diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index 4bc05948f7..a78c692f2d 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -212,7 +212,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt) u32 unit_count; u32 unit; - unit_count = roundup(ipa->endpoint_count, 32); + unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32); for (unit = 0; unit < unit_count; unit++) { const struct reg *reg; u32 val; diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index 0eaa7a7f33..e223886123 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -67,7 +67,7 @@ struct ipa_power { spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); u32 interconnect_count; - struct icc_bulk_data interconnect[]; + struct icc_bulk_data interconnect[] __counted_by(interconnect_count); }; /* Initialize interconnects required for IPA operation */ diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index bddcc12781..29a5929d48 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -250,5 +250,6 @@ static void __exit macvtap_exit(void) module_exit(macvtap_exit); MODULE_ALIAS_RTNL_LINK("macvtap"); +MODULE_DESCRIPTION("MAC-VLAN based tap driver"); MODULE_AUTHOR("Arnd Bergmann "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig index dc71657d91..ce9d2d2ccf 100644 --- a/drivers/net/mctp/Kconfig +++ b/drivers/net/mctp/Kconfig @@ -33,6 +33,15 @@ config MCTP_TRANSPORT_I2C from DMTF specification DSP0237. A MCTP protocol network device is created for each I2C bus that has been assigned a mctp-i2c device. +config MCTP_TRANSPORT_I3C + tristate "MCTP I3C transport" + depends on I3C + help + Provides a driver to access MCTP devices over I3C transport, + from DMTF specification DSP0233. + A MCTP protocol network device is created for each I3C bus + having a "mctp-controller" devicetree property. + endmenu endif diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile index 1ca3e6028f..e1cb99ced5 100644 --- a/drivers/net/mctp/Makefile +++ b/drivers/net/mctp/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o +obj-$(CONFIG_MCTP_TRANSPORT_I3C) += mctp-i3c.o diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c new file mode 100644 index 0000000000..8e989c157c --- /dev/null +++ b/drivers/net/mctp/mctp-i3c.c @@ -0,0 +1,755 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implements DMTF specification + * "DSP0233 Management Component Transport Protocol (MCTP) I3C Transport + * Binding" + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0233_1.0.0.pdf + * + * Copyright (c) 2023 Code Construct + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MCTP_I3C_MAXBUF 65536 +/* 48 bit Provisioned Id */ +#define PID_SIZE 6 + +/* 64 byte payload, 4 byte MCTP header */ +static const int MCTP_I3C_MINMTU = 64 + 4; +/* One byte less to allow for the PEC */ +static const int MCTP_I3C_MAXMTU = MCTP_I3C_MAXBUF - 1; +/* 4 byte MCTP header, no data, 1 byte PEC */ +static const int MCTP_I3C_MINLEN = 4 + 1; + +/* Sufficient for 64kB at min mtu */ +static const int MCTP_I3C_TX_QUEUE_LEN = 1100; + +/* Somewhat arbitrary */ +static const int MCTP_I3C_IBI_SLOTS = 8; + +/* Mandatory Data Byte in an IBI, from DSP0233 */ +#define I3C_MDB_MCTP 0xAE +/* From MIPI Device Characteristics Register (DCR) Assignments */ +#define I3C_DCR_MCTP 0xCC + +static const char *MCTP_I3C_OF_PROP = "mctp-controller"; + +/* List of mctp_i3c_busdev */ +static LIST_HEAD(busdevs); +/* Protects busdevs, as well as mctp_i3c_bus.devs lists */ +static DEFINE_MUTEX(busdevs_lock); + +struct mctp_i3c_bus { + struct net_device *ndev; + + struct task_struct *tx_thread; + wait_queue_head_t tx_wq; + /* tx_lock protects tx_skb and devs */ + spinlock_t tx_lock; + /* Next skb to transmit */ + struct sk_buff *tx_skb; + /* Scratch buffer for xmit */ + u8 tx_scratch[MCTP_I3C_MAXBUF]; + + /* Element of busdevs */ + struct list_head list; + + /* Provisioned ID of our controller */ + u64 pid; + + struct i3c_bus *bus; + /* Head of mctp_i3c_device.list. Protected by busdevs_lock */ + struct list_head devs; +}; + +struct mctp_i3c_device { + struct i3c_device *i3c; + struct mctp_i3c_bus *mbus; + struct list_head list; /* Element of mctp_i3c_bus.devs */ + + /* Held while tx_thread is using this device */ + struct mutex lock; + + /* Whether BCR indicates MDB is present in IBI */ + bool have_mdb; + /* I3C dynamic address */ + u8 addr; + /* Maximum read length */ + u16 mrl; + /* Maximum write length */ + u16 mwl; + /* Provisioned ID */ + u64 pid; +}; + +/* We synthesise a mac header using the Provisioned ID. + * Used to pass dest to mctp_i3c_start_xmit. + */ +struct mctp_i3c_internal_hdr { + u8 dest[PID_SIZE]; + u8 source[PID_SIZE]; +} __packed; + +static int mctp_i3c_read(struct mctp_i3c_device *mi) +{ + struct i3c_priv_xfer xfer = { .rnw = 1, .len = mi->mrl }; + struct net_device_stats *stats = &mi->mbus->ndev->stats; + struct mctp_i3c_internal_hdr *ihdr = NULL; + struct sk_buff *skb = NULL; + struct mctp_skb_cb *cb; + int net_status, rc; + u8 pec, addr; + + skb = netdev_alloc_skb(mi->mbus->ndev, + mi->mrl + sizeof(struct mctp_i3c_internal_hdr)); + if (!skb) { + stats->rx_dropped++; + rc = -ENOMEM; + goto err; + } + + skb->protocol = htons(ETH_P_MCTP); + /* Create a header for internal use */ + skb_reset_mac_header(skb); + ihdr = skb_put(skb, sizeof(struct mctp_i3c_internal_hdr)); + put_unaligned_be48(mi->pid, ihdr->source); + put_unaligned_be48(mi->mbus->pid, ihdr->dest); + skb_pull(skb, sizeof(struct mctp_i3c_internal_hdr)); + + xfer.data.in = skb_put(skb, mi->mrl); + + rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); + if (rc < 0) + goto err; + + if (WARN_ON_ONCE(xfer.len > mi->mrl)) { + /* Bad i3c bus driver */ + rc = -EIO; + goto err; + } + if (xfer.len < MCTP_I3C_MINLEN) { + stats->rx_length_errors++; + rc = -EIO; + goto err; + } + + /* check PEC, including address byte */ + addr = mi->addr << 1 | 1; + pec = i2c_smbus_pec(0, &addr, 1); + pec = i2c_smbus_pec(pec, xfer.data.in, xfer.len - 1); + if (pec != ((u8 *)xfer.data.in)[xfer.len - 1]) { + stats->rx_crc_errors++; + rc = -EINVAL; + goto err; + } + + /* Remove PEC */ + skb_trim(skb, xfer.len - 1); + + cb = __mctp_cb(skb); + cb->halen = PID_SIZE; + put_unaligned_be48(mi->pid, cb->haddr); + + net_status = netif_rx(skb); + + if (net_status == NET_RX_SUCCESS) { + stats->rx_packets++; + stats->rx_bytes += xfer.len - 1; + } else { + stats->rx_dropped++; + } + + return 0; +err: + kfree_skb(skb); + return rc; +} + +static void mctp_i3c_ibi_handler(struct i3c_device *i3c, + const struct i3c_ibi_payload *payload) +{ + struct mctp_i3c_device *mi = i3cdev_get_drvdata(i3c); + + if (WARN_ON_ONCE(!mi)) + return; + + if (mi->have_mdb) { + if (payload->len > 0) { + if (((u8 *)payload->data)[0] != I3C_MDB_MCTP) { + /* Not a mctp-i3c interrupt, ignore it */ + return; + } + } else { + /* The BCR advertised a Mandatory Data Byte but the + * device didn't send one. + */ + dev_warn_once(i3cdev_to_dev(i3c), "IBI with missing MDB"); + } + } + + mctp_i3c_read(mi); +} + +static int mctp_i3c_setup(struct mctp_i3c_device *mi) +{ + const struct i3c_ibi_setup ibi = { + .max_payload_len = 1, + .num_slots = MCTP_I3C_IBI_SLOTS, + .handler = mctp_i3c_ibi_handler, + }; + struct i3c_device_info info; + int rc; + + i3c_device_get_info(mi->i3c, &info); + mi->have_mdb = info.bcr & BIT(2); + mi->addr = info.dyn_addr; + mi->mwl = info.max_write_len; + mi->mrl = info.max_read_len; + mi->pid = info.pid; + + rc = i3c_device_request_ibi(mi->i3c, &ibi); + if (rc == -ENOTSUPP) { + /* This driver only supports In-Band Interrupt mode. + * Support for Polling Mode could be added if required. + * (ENOTSUPP is from the i3c layer, not EOPNOTSUPP). + */ + dev_warn(i3cdev_to_dev(mi->i3c), + "Failed, bus driver doesn't support In-Band Interrupts"); + goto err; + } else if (rc < 0) { + dev_err(i3cdev_to_dev(mi->i3c), + "Failed requesting IBI (%d)\n", rc); + goto err; + } + + rc = i3c_device_enable_ibi(mi->i3c); + if (rc < 0) { + /* Assume a driver supporting request_ibi also + * supports enable_ibi. + */ + dev_err(i3cdev_to_dev(mi->i3c), "Failed enabling IBI (%d)\n", rc); + goto err_free_ibi; + } + + return 0; + +err_free_ibi: + i3c_device_free_ibi(mi->i3c); + +err: + return rc; +} + +/* Adds a new MCTP i3c_device to a bus */ +static int mctp_i3c_add_device(struct mctp_i3c_bus *mbus, + struct i3c_device *i3c) +__must_hold(&busdevs_lock) +{ + struct mctp_i3c_device *mi = NULL; + int rc; + + mi = kzalloc(sizeof(*mi), GFP_KERNEL); + if (!mi) { + rc = -ENOMEM; + goto err; + } + mi->mbus = mbus; + mi->i3c = i3c; + mutex_init(&mi->lock); + list_add(&mi->list, &mbus->devs); + + i3cdev_set_drvdata(i3c, mi); + rc = mctp_i3c_setup(mi); + if (rc < 0) + goto err_free; + + return 0; + +err_free: + list_del(&mi->list); + kfree(mi); + +err: + dev_warn(i3cdev_to_dev(i3c), "Error adding mctp-i3c device, %d\n", rc); + return rc; +} + +static int mctp_i3c_probe(struct i3c_device *i3c) +{ + struct mctp_i3c_bus *b = NULL, *mbus = NULL; + + /* Look for a known bus */ + mutex_lock(&busdevs_lock); + list_for_each_entry(b, &busdevs, list) + if (b->bus == i3c->bus) { + mbus = b; + break; + } + mutex_unlock(&busdevs_lock); + + if (!mbus) { + /* probably no "mctp-controller" property on the i3c bus */ + return -ENODEV; + } + + return mctp_i3c_add_device(mbus, i3c); +} + +static void mctp_i3c_remove_device(struct mctp_i3c_device *mi) +__must_hold(&busdevs_lock) +{ + /* Ensure the tx thread isn't using the device */ + mutex_lock(&mi->lock); + + /* Counterpart of mctp_i3c_setup */ + i3c_device_disable_ibi(mi->i3c); + i3c_device_free_ibi(mi->i3c); + + /* Counterpart of mctp_i3c_add_device */ + i3cdev_set_drvdata(mi->i3c, NULL); + list_del(&mi->list); + + /* Safe to unlock after removing from the list */ + mutex_unlock(&mi->lock); + kfree(mi); +} + +static void mctp_i3c_remove(struct i3c_device *i3c) +{ + struct mctp_i3c_device *mi = i3cdev_get_drvdata(i3c); + + /* We my have received a Bus Remove notify prior to device remove, + * so mi will already be removed. + */ + if (!mi) + return; + + mutex_lock(&busdevs_lock); + mctp_i3c_remove_device(mi); + mutex_unlock(&busdevs_lock); +} + +/* Returns the device for an address, with mi->lock held */ +static struct mctp_i3c_device * +mctp_i3c_lookup(struct mctp_i3c_bus *mbus, u64 pid) +{ + struct mctp_i3c_device *mi = NULL, *ret = NULL; + + mutex_lock(&busdevs_lock); + list_for_each_entry(mi, &mbus->devs, list) + if (mi->pid == pid) { + ret = mi; + mutex_lock(&mi->lock); + break; + } + mutex_unlock(&busdevs_lock); + return ret; +} + +static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb) +{ + struct net_device_stats *stats = &mbus->ndev->stats; + struct i3c_priv_xfer xfer = { .rnw = false }; + struct mctp_i3c_internal_hdr *ihdr = NULL; + struct mctp_i3c_device *mi = NULL; + unsigned int data_len; + u8 *data = NULL; + u8 addr, pec; + int rc = 0; + u64 pid; + + skb_pull(skb, sizeof(struct mctp_i3c_internal_hdr)); + data_len = skb->len; + + ihdr = (void *)skb_mac_header(skb); + + pid = get_unaligned_be48(ihdr->dest); + mi = mctp_i3c_lookup(mbus, pid); + if (!mi) { + /* I3C endpoint went away after the packet was enqueued? */ + stats->tx_dropped++; + goto out; + } + + if (WARN_ON_ONCE(data_len + 1 > MCTP_I3C_MAXBUF)) + goto out; + + if (data_len + 1 > (unsigned int)mi->mwl) { + /* Route MTU was larger than supported by the endpoint */ + stats->tx_dropped++; + goto out; + } + + /* Need a linear buffer with space for the PEC */ + xfer.len = data_len + 1; + if (skb_tailroom(skb) >= 1) { + skb_put(skb, 1); + data = skb->data; + } else { + /* Otherwise need to copy the buffer */ + skb_copy_bits(skb, 0, mbus->tx_scratch, skb->len); + data = mbus->tx_scratch; + } + + /* PEC calculation */ + addr = mi->addr << 1; + pec = i2c_smbus_pec(0, &addr, 1); + pec = i2c_smbus_pec(pec, data, data_len); + data[data_len] = pec; + + xfer.data.out = data; + rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); + if (rc == 0) { + stats->tx_bytes += data_len; + stats->tx_packets++; + } else { + stats->tx_errors++; + } + +out: + if (mi) + mutex_unlock(&mi->lock); +} + +static int mctp_i3c_tx_thread(void *data) +{ + struct mctp_i3c_bus *mbus = data; + struct sk_buff *skb; + + for (;;) { + if (kthread_should_stop()) + break; + + spin_lock_bh(&mbus->tx_lock); + skb = mbus->tx_skb; + mbus->tx_skb = NULL; + spin_unlock_bh(&mbus->tx_lock); + + if (netif_queue_stopped(mbus->ndev)) + netif_wake_queue(mbus->ndev); + + if (skb) { + mctp_i3c_xmit(mbus, skb); + kfree_skb(skb); + } else { + wait_event_idle(mbus->tx_wq, + mbus->tx_skb || kthread_should_stop()); + } + } + + return 0; +} + +static netdev_tx_t mctp_i3c_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mctp_i3c_bus *mbus = netdev_priv(ndev); + netdev_tx_t ret; + + spin_lock(&mbus->tx_lock); + netif_stop_queue(ndev); + if (mbus->tx_skb) { + dev_warn_ratelimited(&ndev->dev, "TX with queue stopped"); + ret = NETDEV_TX_BUSY; + } else { + mbus->tx_skb = skb; + ret = NETDEV_TX_OK; + } + spin_unlock(&mbus->tx_lock); + + if (ret == NETDEV_TX_OK) + wake_up(&mbus->tx_wq); + + return ret; +} + +static void mctp_i3c_bus_free(struct mctp_i3c_bus *mbus) +__must_hold(&busdevs_lock) +{ + struct mctp_i3c_device *mi = NULL, *tmp = NULL; + + if (mbus->tx_thread) { + kthread_stop(mbus->tx_thread); + mbus->tx_thread = NULL; + } + + /* Remove any child devices */ + list_for_each_entry_safe(mi, tmp, &mbus->devs, list) { + mctp_i3c_remove_device(mi); + } + + kfree_skb(mbus->tx_skb); + list_del(&mbus->list); +} + +static void mctp_i3c_ndo_uninit(struct net_device *ndev) +{ + struct mctp_i3c_bus *mbus = netdev_priv(ndev); + + /* Perform cleanup here to ensure there are no remaining references */ + mctp_i3c_bus_free(mbus); +} + +static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len) +{ + struct mctp_i3c_internal_hdr *ihdr; + + skb_push(skb, sizeof(struct mctp_i3c_internal_hdr)); + skb_reset_mac_header(skb); + ihdr = (void *)skb_mac_header(skb); + memcpy(ihdr->dest, daddr, PID_SIZE); + memcpy(ihdr->source, saddr, PID_SIZE); + return 0; +} + +static const struct net_device_ops mctp_i3c_ops = { + .ndo_start_xmit = mctp_i3c_start_xmit, + .ndo_uninit = mctp_i3c_ndo_uninit, +}; + +static const struct header_ops mctp_i3c_headops = { + .create = mctp_i3c_header_create, +}; + +static void mctp_i3c_net_setup(struct net_device *dev) +{ + dev->type = ARPHRD_MCTP; + + dev->mtu = MCTP_I3C_MAXMTU; + dev->min_mtu = MCTP_I3C_MINMTU; + dev->max_mtu = MCTP_I3C_MAXMTU; + dev->tx_queue_len = MCTP_I3C_TX_QUEUE_LEN; + + dev->hard_header_len = sizeof(struct mctp_i3c_internal_hdr); + dev->addr_len = PID_SIZE; + + dev->netdev_ops = &mctp_i3c_ops; + dev->header_ops = &mctp_i3c_headops; +} + +static bool mctp_i3c_is_mctp_controller(struct i3c_bus *bus) +{ + struct i3c_dev_desc *master = bus->cur_master; + + if (!master) + return false; + + return of_property_read_bool(master->common.master->dev.of_node, + MCTP_I3C_OF_PROP); +} + +/* Returns the Provisioned Id of a local bus master */ +static int mctp_i3c_bus_local_pid(struct i3c_bus *bus, u64 *ret_pid) +{ + struct i3c_dev_desc *master; + + master = bus->cur_master; + if (WARN_ON_ONCE(!master)) + return -ENOENT; + *ret_pid = master->info.pid; + + return 0; +} + +/* Returns an ERR_PTR on failure */ +static struct mctp_i3c_bus *mctp_i3c_bus_add(struct i3c_bus *bus) +__must_hold(&busdevs_lock) +{ + struct mctp_i3c_bus *mbus = NULL; + struct net_device *ndev = NULL; + char namebuf[IFNAMSIZ]; + u8 addr[PID_SIZE]; + int rc; + + if (!mctp_i3c_is_mctp_controller(bus)) + return ERR_PTR(-ENOENT); + + snprintf(namebuf, sizeof(namebuf), "mctpi3c%d", bus->id); + ndev = alloc_netdev(sizeof(*mbus), namebuf, NET_NAME_ENUM, + mctp_i3c_net_setup); + if (!ndev) { + rc = -ENOMEM; + goto err; + } + + mbus = netdev_priv(ndev); + mbus->ndev = ndev; + mbus->bus = bus; + INIT_LIST_HEAD(&mbus->devs); + list_add(&mbus->list, &busdevs); + + rc = mctp_i3c_bus_local_pid(bus, &mbus->pid); + if (rc < 0) { + dev_err(&ndev->dev, "No I3C PID available\n"); + goto err_free_uninit; + } + put_unaligned_be48(mbus->pid, addr); + dev_addr_set(ndev, addr); + + init_waitqueue_head(&mbus->tx_wq); + spin_lock_init(&mbus->tx_lock); + mbus->tx_thread = kthread_run(mctp_i3c_tx_thread, mbus, + "%s/tx", ndev->name); + if (IS_ERR(mbus->tx_thread)) { + dev_warn(&ndev->dev, "Error creating thread: %pe\n", + mbus->tx_thread); + rc = PTR_ERR(mbus->tx_thread); + mbus->tx_thread = NULL; + goto err_free_uninit; + } + + rc = mctp_register_netdev(ndev, NULL); + if (rc < 0) { + dev_warn(&ndev->dev, "netdev register failed: %d\n", rc); + goto err_free_netdev; + } + return mbus; + +err_free_uninit: + /* uninit will not get called if a netdev has not been registered, + * so we perform the same mbus cleanup manually. + */ + mctp_i3c_bus_free(mbus); + +err_free_netdev: + free_netdev(ndev); + +err: + return ERR_PTR(rc); +} + +static void mctp_i3c_bus_remove(struct mctp_i3c_bus *mbus) +__must_hold(&busdevs_lock) +{ + /* Unregister calls through to ndo_uninit -> mctp_i3c_bus_free() */ + mctp_unregister_netdev(mbus->ndev); + + free_netdev(mbus->ndev); + /* mbus is deallocated */ +} + +/* Removes all mctp-i3c busses */ +static void mctp_i3c_bus_remove_all(void) +{ + struct mctp_i3c_bus *mbus = NULL, *tmp = NULL; + + mutex_lock(&busdevs_lock); + list_for_each_entry_safe(mbus, tmp, &busdevs, list) { + mctp_i3c_bus_remove(mbus); + } + mutex_unlock(&busdevs_lock); +} + +/* Adds a i3c_bus if it isn't already in the busdevs list. + * Suitable as an i3c_for_each_bus_locked callback. + */ +static int mctp_i3c_bus_add_new(struct i3c_bus *bus, void *data) +{ + struct mctp_i3c_bus *mbus = NULL, *tmp = NULL; + bool exists = false; + + mutex_lock(&busdevs_lock); + list_for_each_entry_safe(mbus, tmp, &busdevs, list) + if (mbus->bus == bus) + exists = true; + + /* It is OK for a bus to already exist. That can occur due to + * the race in mod_init between notifier and for_each_bus + */ + if (!exists) + mctp_i3c_bus_add(bus); + mutex_unlock(&busdevs_lock); + return 0; +} + +static void mctp_i3c_notify_bus_remove(struct i3c_bus *bus) +{ + struct mctp_i3c_bus *mbus = NULL, *tmp; + + mutex_lock(&busdevs_lock); + list_for_each_entry_safe(mbus, tmp, &busdevs, list) + if (mbus->bus == bus) + mctp_i3c_bus_remove(mbus); + mutex_unlock(&busdevs_lock); +} + +static int mctp_i3c_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + switch (action) { + case I3C_NOTIFY_BUS_ADD: + mctp_i3c_bus_add_new((struct i3c_bus *)data, NULL); + break; + case I3C_NOTIFY_BUS_REMOVE: + mctp_i3c_notify_bus_remove((struct i3c_bus *)data); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block mctp_i3c_notifier = { + .notifier_call = mctp_i3c_notifier_call, +}; + +static const struct i3c_device_id mctp_i3c_ids[] = { + I3C_CLASS(I3C_DCR_MCTP, NULL), + { 0 }, +}; + +static struct i3c_driver mctp_i3c_driver = { + .driver = { + .name = "mctp-i3c", + }, + .probe = mctp_i3c_probe, + .remove = mctp_i3c_remove, + .id_table = mctp_i3c_ids, +}; + +static __init int mctp_i3c_mod_init(void) +{ + int rc; + + rc = i3c_register_notifier(&mctp_i3c_notifier); + if (rc < 0) { + i3c_driver_unregister(&mctp_i3c_driver); + return rc; + } + + i3c_for_each_bus_locked(mctp_i3c_bus_add_new, NULL); + + rc = i3c_driver_register(&mctp_i3c_driver); + if (rc < 0) + return rc; + + return 0; +} + +static __exit void mctp_i3c_mod_exit(void) +{ + int rc; + + i3c_driver_unregister(&mctp_i3c_driver); + + rc = i3c_unregister_notifier(&mctp_i3c_notifier); + if (rc < 0) + pr_warn("MCTP I3C could not unregister notifier, %d\n", rc); + + mctp_i3c_bus_remove_all(); +} + +module_init(mctp_i3c_mod_init); +module_exit(mctp_i3c_mod_exit); + +MODULE_DEVICE_TABLE(i3c, mctp_i3c_ids); +MODULE_DESCRIPTION("MCTP I3C device"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Matt Johnston "); diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c index 4630dde019..5d0f11f280 100644 --- a/drivers/net/mdio/acpi_mdio.c +++ b/drivers/net/mdio/acpi_mdio.c @@ -16,6 +16,7 @@ MODULE_AUTHOR("Calvin Johnson "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ACPI MDIO bus (Ethernet PHY) accessors"); /** * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL. diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index 1183ef5e20..fd02f5cbc8 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -14,6 +14,7 @@ MODULE_AUTHOR("Calvin Johnson "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors"); static struct pse_control * fwnode_find_pse_control(struct fwnode_handle *fwnode) diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index c727103c8b..c217065041 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -177,15 +177,13 @@ static int aspeed_mdio_probe(struct platform_device *pdev) return 0; } -static int aspeed_mdio_remove(struct platform_device *pdev) +static void aspeed_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev); struct aspeed_mdio *ctx = bus->priv; reset_control_assert(ctx->reset); mdiobus_unregister(bus); - - return 0; } static const struct of_device_id aspeed_mdio_of_match[] = { @@ -200,10 +198,11 @@ static struct platform_driver aspeed_mdio_driver = { .of_match_table = aspeed_mdio_of_match, }, .probe = aspeed_mdio_probe, - .remove = aspeed_mdio_remove, + .remove_new = aspeed_mdio_remove, }; module_platform_driver(aspeed_mdio_driver); MODULE_AUTHOR("Andrew Jeffery "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ASPEED MDIO bus controller"); diff --git a/drivers/net/mdio/mdio-bcm-iproc.c b/drivers/net/mdio/mdio-bcm-iproc.c index 77fc970cdf..5a2d26c6af 100644 --- a/drivers/net/mdio/mdio-bcm-iproc.c +++ b/drivers/net/mdio/mdio-bcm-iproc.c @@ -168,14 +168,12 @@ err_iproc_mdio: return rc; } -static int iproc_mdio_remove(struct platform_device *pdev) +static void iproc_mdio_remove(struct platform_device *pdev) { struct iproc_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -210,7 +208,7 @@ static struct platform_driver iproc_mdio_driver = { #endif }, .probe = iproc_mdio_probe, - .remove = iproc_mdio_remove, + .remove_new = iproc_mdio_remove, }; module_platform_driver(iproc_mdio_driver); diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index 6b26a08036..e8cd8eef31 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -296,15 +296,13 @@ out_clk_disable: return ret; } -static int unimac_mdio_remove(struct platform_device *pdev) +static void unimac_mdio_remove(struct platform_device *pdev) { struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); clk_disable_unprepare(priv->clk); - - return 0; } static int __maybe_unused unimac_mdio_suspend(struct device *d) @@ -353,7 +351,7 @@ static struct platform_driver unimac_mdio_driver = { .pm = &unimac_mdio_pm_ops, }, .probe = unimac_mdio_probe, - .remove = unimac_mdio_remove, + .remove_new = unimac_mdio_remove, }; module_platform_driver(unimac_mdio_driver); diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index 81b7748c10..f88639297f 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -263,3 +263,4 @@ void free_mdio_bitbang(struct mii_bus *bus) EXPORT_SYMBOL(free_mdio_bitbang); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Bitbanged MDIO buses"); diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 0fb3c2de08..897b88c50b 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -194,11 +194,9 @@ static int mdio_gpio_probe(struct platform_device *pdev) return ret; } -static int mdio_gpio_remove(struct platform_device *pdev) +static void mdio_gpio_remove(struct platform_device *pdev) { mdio_gpio_bus_destroy(&pdev->dev); - - return 0; } static const struct of_device_id mdio_gpio_of_match[] = { @@ -210,7 +208,7 @@ MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, - .remove = mdio_gpio_remove, + .remove_new = mdio_gpio_remove, .driver = { .name = "mdio-gpio", .of_match_table = mdio_gpio_of_match, diff --git a/drivers/net/mdio/mdio-hisi-femac.c b/drivers/net/mdio/mdio-hisi-femac.c index f231c2fbb1..6703f626ee 100644 --- a/drivers/net/mdio/mdio-hisi-femac.c +++ b/drivers/net/mdio/mdio-hisi-femac.c @@ -118,7 +118,7 @@ err_out_free_mdiobus: return ret; } -static int hisi_femac_mdio_remove(struct platform_device *pdev) +static void hisi_femac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct hisi_femac_mdio_data *data = bus->priv; @@ -126,8 +126,6 @@ static int hisi_femac_mdio_remove(struct platform_device *pdev) mdiobus_unregister(bus); clk_disable_unprepare(data->clk); mdiobus_free(bus); - - return 0; } static const struct of_device_id hisi_femac_mdio_dt_ids[] = { @@ -138,7 +136,7 @@ MODULE_DEVICE_TABLE(of, hisi_femac_mdio_dt_ids); static struct platform_driver hisi_femac_mdio_driver = { .probe = hisi_femac_mdio_probe, - .remove = hisi_femac_mdio_remove, + .remove_new = hisi_femac_mdio_remove, .driver = { .name = "hisi-femac-mdio", .of_match_table = hisi_femac_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 78b93de636..abd8b508ec 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -278,13 +278,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev) return 0; } -static int ipq4019_mdio_remove(struct platform_device *pdev) +static void ipq4019_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); - - return 0; } static const struct of_device_id ipq4019_mdio_dt_ids[] = { @@ -296,7 +294,7 @@ MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids); static struct platform_driver ipq4019_mdio_driver = { .probe = ipq4019_mdio_probe, - .remove = ipq4019_mdio_remove, + .remove_new = ipq4019_mdio_remove, .driver = { .name = "ipq4019-mdio", .of_match_table = ipq4019_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index fd97169601..f71b6e1c66 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -147,14 +147,11 @@ ipq8064_mdio_probe(struct platform_device *pdev) return 0; } -static int -ipq8064_mdio_remove(struct platform_device *pdev) +static void ipq8064_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); - - return 0; } static const struct of_device_id ipq8064_mdio_dt_ids[] = { @@ -165,7 +162,7 @@ MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids); static struct platform_driver ipq8064_mdio_driver = { .probe = ipq8064_mdio_probe, - .remove = ipq8064_mdio_remove, + .remove_new = ipq8064_mdio_remove, .driver = { .name = "ipq8064-mdio", .of_match_table = ipq8064_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c index f0cff584e1..d35af8cd7c 100644 --- a/drivers/net/mdio/mdio-moxart.c +++ b/drivers/net/mdio/mdio-moxart.c @@ -155,14 +155,12 @@ err_out_free_mdiobus: return ret; } -static int moxart_mdio_remove(struct platform_device *pdev) +static void moxart_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); mdiobus_free(bus); - - return 0; } static const struct of_device_id moxart_mdio_dt_ids[] = { @@ -173,7 +171,7 @@ MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids); static struct platform_driver moxart_mdio_driver = { .probe = moxart_mdio_probe, - .remove = moxart_mdio_remove, + .remove_new = moxart_mdio_remove, .driver = { .name = "moxart-mdio", .of_match_table = moxart_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 1a1b95ae95..c29377c853 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -335,15 +335,13 @@ out_disable_clk: return ret; } -static int mscc_miim_remove(struct platform_device *pdev) +static void mscc_miim_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct mscc_miim_dev *miim = bus->priv; clk_disable_unprepare(miim->clk); mdiobus_unregister(bus); - - return 0; } static const struct mscc_miim_info mscc_ocelot_miim_info = { @@ -371,7 +369,7 @@ MODULE_DEVICE_TABLE(of, mscc_miim_match); static struct platform_driver mscc_miim_driver = { .probe = mscc_miim_probe, - .remove = mscc_miim_remove, + .remove_new = mscc_miim_remove, .driver = { .name = "mscc-miim", .of_match_table = mscc_miim_match, diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 956d54846b..a750bd4c77 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -287,15 +287,13 @@ out_clk: return rc; } -static int mdio_mux_iproc_remove(struct platform_device *pdev) +static void mdio_mux_iproc_remove(struct platform_device *pdev) { struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); clk_disable_unprepare(md->core_clk); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -342,7 +340,7 @@ static struct platform_driver mdiomux_iproc_driver = { .pm = &mdio_mux_iproc_pm_ops, }, .probe = mdio_mux_iproc_probe, - .remove = mdio_mux_iproc_remove, + .remove_new = mdio_mux_iproc_remove, }; module_platform_driver(mdiomux_iproc_driver); diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c index 8b444a8eb6..1b77e0e3e6 100644 --- a/drivers/net/mdio/mdio-mux-bcm6368.c +++ b/drivers/net/mdio/mdio-mux-bcm6368.c @@ -153,14 +153,12 @@ out_register: return rc; } -static int bcm6368_mdiomux_remove(struct platform_device *pdev) +static void bcm6368_mdiomux_remove(struct platform_device *pdev) { struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); - - return 0; } static const struct of_device_id bcm6368_mdiomux_ids[] = { @@ -175,7 +173,7 @@ static struct platform_driver bcm6368_mdiomux_driver = { .of_match_table = bcm6368_mdiomux_ids, }, .probe = bcm6368_mdiomux_probe, - .remove = bcm6368_mdiomux_remove, + .remove_new = bcm6368_mdiomux_remove, }; module_platform_driver(bcm6368_mdiomux_driver); diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index 3c7f16f06b..38fb031f89 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -62,11 +62,10 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev) return 0; } -static int mdio_mux_gpio_remove(struct platform_device *pdev) +static void mdio_mux_gpio_remove(struct platform_device *pdev) { struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); - return 0; } static const struct of_device_id mdio_mux_gpio_match[] = { @@ -87,7 +86,7 @@ static struct platform_driver mdio_mux_gpio_driver = { .of_match_table = mdio_mux_gpio_match, }, .probe = mdio_mux_gpio_probe, - .remove = mdio_mux_gpio_remove, + .remove_new = mdio_mux_gpio_remove, }; module_platform_driver(mdio_mux_gpio_driver); diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c index 910e5cf74e..754b0f2cf1 100644 --- a/drivers/net/mdio/mdio-mux-meson-g12a.c +++ b/drivers/net/mdio/mdio-mux-meson-g12a.c @@ -336,7 +336,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev) return ret; } -static int g12a_mdio_mux_remove(struct platform_device *pdev) +static void g12a_mdio_mux_remove(struct platform_device *pdev) { struct g12a_mdio_mux *priv = platform_get_drvdata(pdev); @@ -344,13 +344,11 @@ static int g12a_mdio_mux_remove(struct platform_device *pdev) if (__clk_is_enabled(priv->pll)) clk_disable_unprepare(priv->pll); - - return 0; } static struct platform_driver g12a_mdio_mux_driver = { .probe = g12a_mdio_mux_probe, - .remove = g12a_mdio_mux_remove, + .remove_new = g12a_mdio_mux_remove, .driver = { .name = "g12a-mdio_mux", .of_match_table = g12a_mdio_mux_match, diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c index 76188575ca..89554021b5 100644 --- a/drivers/net/mdio/mdio-mux-meson-gxl.c +++ b/drivers/net/mdio/mdio-mux-meson-gxl.c @@ -140,18 +140,16 @@ static int gxl_mdio_mux_probe(struct platform_device *pdev) return ret; } -static int gxl_mdio_mux_remove(struct platform_device *pdev) +static void gxl_mdio_mux_remove(struct platform_device *pdev) { struct gxl_mdio_mux *priv = platform_get_drvdata(pdev); mdio_mux_uninit(priv->mux_handle); - - return 0; } static struct platform_driver gxl_mdio_mux_driver = { .probe = gxl_mdio_mux_probe, - .remove = gxl_mdio_mux_remove, + .remove_new = gxl_mdio_mux_remove, .driver = { .name = "gxl-mdio-mux", .of_match_table = gxl_mdio_mux_match, diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index 09af150ed7..de08419d0c 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -169,13 +169,11 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) return 0; } -static int mdio_mux_mmioreg_remove(struct platform_device *pdev) +static void mdio_mux_mmioreg_remove(struct platform_device *pdev) { struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); - - return 0; } static const struct of_device_id mdio_mux_mmioreg_match[] = { @@ -192,7 +190,7 @@ static struct platform_driver mdio_mux_mmioreg_driver = { .of_match_table = mdio_mux_mmioreg_match, }, .probe = mdio_mux_mmioreg_probe, - .remove = mdio_mux_mmioreg_remove, + .remove_new = mdio_mux_mmioreg_remove, }; module_platform_driver(mdio_mux_mmioreg_driver); diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c index bfa5af577b..569b133831 100644 --- a/drivers/net/mdio/mdio-mux-multiplexer.c +++ b/drivers/net/mdio/mdio-mux-multiplexer.c @@ -85,7 +85,7 @@ static int mdio_mux_multiplexer_probe(struct platform_device *pdev) return ret; } -static int mdio_mux_multiplexer_remove(struct platform_device *pdev) +static void mdio_mux_multiplexer_remove(struct platform_device *pdev) { struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev); @@ -93,8 +93,6 @@ static int mdio_mux_multiplexer_remove(struct platform_device *pdev) if (s->do_deselect) mux_control_deselect(s->muxc); - - return 0; } static const struct of_device_id mdio_mux_multiplexer_match[] = { @@ -109,7 +107,7 @@ static struct platform_driver mdio_mux_multiplexer_driver = { .of_match_table = mdio_mux_multiplexer_match, }, .probe = mdio_mux_multiplexer_probe, - .remove = mdio_mux_multiplexer_remove, + .remove_new = mdio_mux_multiplexer_remove, }; module_platform_driver(mdio_mux_multiplexer_driver); diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index 7c65c547d3..037a38cfed 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -78,7 +78,7 @@ fail_register: return err; } -static int octeon_mdiobus_remove(struct platform_device *pdev) +static void octeon_mdiobus_remove(struct platform_device *pdev) { struct cavium_mdiobus *bus; union cvmx_smix_en smi_en; @@ -88,7 +88,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev) mdiobus_unregister(bus->mii_bus); smi_en.u64 = 0; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); - return 0; } static const struct of_device_id octeon_mdiobus_match[] = { @@ -105,7 +104,7 @@ static struct platform_driver octeon_mdiobus_driver = { .of_match_table = octeon_mdiobus_match, }, .probe = octeon_mdiobus_probe, - .remove = octeon_mdiobus_remove, + .remove_new = octeon_mdiobus_remove, }; module_platform_driver(octeon_mdiobus_driver); diff --git a/drivers/net/mdio/mdio-sun4i.c b/drivers/net/mdio/mdio-sun4i.c index f798de3276..4511bcc73b 100644 --- a/drivers/net/mdio/mdio-sun4i.c +++ b/drivers/net/mdio/mdio-sun4i.c @@ -142,7 +142,7 @@ err_out_free_mdiobus: return ret; } -static int sun4i_mdio_remove(struct platform_device *pdev) +static void sun4i_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct sun4i_mdio_data *data = bus->priv; @@ -151,8 +151,6 @@ static int sun4i_mdio_remove(struct platform_device *pdev) if (data->regulator) regulator_disable(data->regulator); mdiobus_free(bus); - - return 0; } static const struct of_device_id sun4i_mdio_dt_ids[] = { @@ -166,7 +164,7 @@ MODULE_DEVICE_TABLE(of, sun4i_mdio_dt_ids); static struct platform_driver sun4i_mdio_driver = { .probe = sun4i_mdio_probe, - .remove = sun4i_mdio_remove, + .remove_new = sun4i_mdio_remove, .driver = { .name = "sun4i-mdio", .of_match_table = sun4i_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c index 1190a79355..2772a30985 100644 --- a/drivers/net/mdio/mdio-xgene.c +++ b/drivers/net/mdio/mdio-xgene.c @@ -13,11 +13,13 @@ #include #include #include +#include #include #include -#include #include +#include #include +#include #include u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr) @@ -326,24 +328,11 @@ static int xgene_mdio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mii_bus *mdio_bus; - const struct of_device_id *of_id; struct xgene_mdio_pdata *pdata; void __iomem *csr_base; int mdio_id = 0, ret = 0; - of_id = of_match_device(xgene_mdio_of_match, &pdev->dev); - if (of_id) { - mdio_id = (uintptr_t)of_id->data; - } else { -#ifdef CONFIG_ACPI - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(xgene_mdio_acpi_match, &pdev->dev); - if (acpi_id) - mdio_id = (enum xgene_mdio_id)acpi_id->driver_data; -#endif - } - + mdio_id = (uintptr_t)device_get_match_data(&pdev->dev); if (!mdio_id) return -ENODEV; @@ -432,7 +421,7 @@ out_clk: return ret; } -static int xgene_mdio_remove(struct platform_device *pdev) +static void xgene_mdio_remove(struct platform_device *pdev) { struct xgene_mdio_pdata *pdata = platform_get_drvdata(pdev); struct mii_bus *mdio_bus = pdata->mdio_bus; @@ -443,18 +432,16 @@ static int xgene_mdio_remove(struct platform_device *pdev) if (dev->of_node) clk_disable_unprepare(pdata->clk); - - return 0; } static struct platform_driver xgene_mdio_driver = { .driver = { .name = "xgene-mdio", - .of_match_table = of_match_ptr(xgene_mdio_of_match), + .of_match_table = xgene_mdio_of_match, .acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match), }, .probe = xgene_mdio_probe, - .remove = xgene_mdio_remove, + .remove_new = xgene_mdio_remove, }; module_platform_driver(xgene_mdio_driver); diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 7eb32ebb84..64ebcb6d23 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -25,6 +25,7 @@ MODULE_AUTHOR("Grant Likely "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("OpenFirmware MDIO bus (Ethernet PHY) accessors"); /* Extract the clause 22 phy ID from the compatible string of the form * ethernet-phy-idAAAA.BBBB */ diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 3111e16485..6e14ba5e06 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -53,6 +53,8 @@ static bool oops_only = false; module_param(oops_only, bool, 0600); MODULE_PARM_DESC(oops_only, "Only log oops messages"); +#define NETCONSOLE_PARAM_TARGET_PREFIX "cmdline" + #ifndef MODULE static int __init option_setup(char *opt) { @@ -165,6 +167,10 @@ static void netconsole_target_put(struct netconsole_target *nt) { } +static void populate_configfs_item(struct netconsole_target *nt, + int cmdline_count) +{ +} #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Allocate and initialize with defaults. @@ -192,58 +198,6 @@ static struct netconsole_target *alloc_and_init(void) return nt; } -/* Allocate new target (from boot/module param) and setup netpoll for it */ -static struct netconsole_target *alloc_param_target(char *target_config) -{ - struct netconsole_target *nt; - int err; - - nt = alloc_and_init(); - if (!nt) { - err = -ENOMEM; - goto fail; - } - - if (*target_config == '+') { - nt->extended = true; - target_config++; - } - - if (*target_config == 'r') { - if (!nt->extended) { - pr_err("Netconsole configuration error. Release feature requires extended log message"); - err = -EINVAL; - goto fail; - } - nt->release = true; - target_config++; - } - - /* Parse parameters and setup netpoll */ - err = netpoll_parse_options(&nt->np, target_config); - if (err) - goto fail; - - err = netpoll_setup(&nt->np); - if (err) - goto fail; - - nt->enabled = true; - - return nt; - -fail: - kfree(nt); - return ERR_PTR(err); -} - -/* Cleanup netpoll for given target (from boot/module param) and free it */ -static void free_param_target(struct netconsole_target *nt) -{ - netpoll_cleanup(&nt->np); - kfree(nt); -} - #ifdef CONFIG_NETCONSOLE_DYNAMIC /* @@ -675,6 +629,23 @@ static const struct config_item_type netconsole_target_type = { .ct_owner = THIS_MODULE, }; +static struct netconsole_target *find_cmdline_target(const char *name) +{ + struct netconsole_target *nt, *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&target_list_lock, flags); + list_for_each_entry(nt, &target_list, list) { + if (!strcmp(nt->item.ci_name, name)) { + ret = nt; + break; + } + } + spin_unlock_irqrestore(&target_list_lock, flags); + + return ret; +} + /* * Group operations and type for netconsole_subsys. */ @@ -685,6 +656,17 @@ static struct config_item *make_netconsole_target(struct config_group *group, struct netconsole_target *nt; unsigned long flags; + /* Checking if a target by this name was created at boot time. If so, + * attach a configfs entry to that target. This enables dynamic + * control. + */ + if (!strncmp(name, NETCONSOLE_PARAM_TARGET_PREFIX, + strlen(NETCONSOLE_PARAM_TARGET_PREFIX))) { + nt = find_cmdline_target(name); + if (nt) + return &nt->item; + } + nt = alloc_and_init(); if (!nt) return ERR_PTR(-ENOMEM); @@ -740,6 +722,17 @@ static struct configfs_subsystem netconsole_subsys = { }, }; +static void populate_configfs_item(struct netconsole_target *nt, + int cmdline_count) +{ + char target_name[16]; + + snprintf(target_name, sizeof(target_name), "%s%d", + NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count); + config_item_init_type_name(&nt->item, target_name, + &netconsole_target_type); +} + #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Handle network interface device notifications */ @@ -938,6 +931,60 @@ static void write_msg(struct console *con, const char *msg, unsigned int len) spin_unlock_irqrestore(&target_list_lock, flags); } +/* Allocate new target (from boot/module param) and setup netpoll for it */ +static struct netconsole_target *alloc_param_target(char *target_config, + int cmdline_count) +{ + struct netconsole_target *nt; + int err; + + nt = alloc_and_init(); + if (!nt) { + err = -ENOMEM; + goto fail; + } + + if (*target_config == '+') { + nt->extended = true; + target_config++; + } + + if (*target_config == 'r') { + if (!nt->extended) { + pr_err("Netconsole configuration error. Release feature requires extended log message"); + err = -EINVAL; + goto fail; + } + nt->release = true; + target_config++; + } + + /* Parse parameters and setup netpoll */ + err = netpoll_parse_options(&nt->np, target_config); + if (err) + goto fail; + + err = netpoll_setup(&nt->np); + if (err) + goto fail; + + populate_configfs_item(nt, cmdline_count); + nt->enabled = true; + + return nt; + +fail: + kfree(nt); + return ERR_PTR(err); +} + +/* Cleanup netpoll for given target (from boot/module param) and free it */ +static void free_param_target(struct netconsole_target *nt) +{ + netpoll_cleanup(&nt->np); + kfree(nt); +} + static struct console netconsole_ext = { .name = "netcon_ext", .flags = CON_ENABLED | CON_EXTENDED, @@ -954,6 +1001,7 @@ static int __init init_netconsole(void) { int err; struct netconsole_target *nt, *tmp; + unsigned int count = 0; bool extended = false; unsigned long flags; char *target_config; @@ -961,7 +1009,7 @@ static int __init init_netconsole(void) if (strnlen(input, MAX_PARAM_LENGTH)) { while ((target_config = strsep(&input, ";"))) { - nt = alloc_param_target(target_config); + nt = alloc_param_target(target_config, count); if (IS_ERR(nt)) { err = PTR_ERR(nt); goto fail; @@ -977,6 +1025,7 @@ static int __init init_netconsole(void) spin_lock_irqsave(&target_list_lock, flags); list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); + count++; } } diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c index 0787ad252d..bcbc1e19ed 100644 --- a/drivers/net/netdevsim/bus.c +++ b/drivers/net/netdevsim/bus.c @@ -3,11 +3,13 @@ * Copyright (C) 2019 Mellanox Technologies. All rights reserved */ +#include #include #include #include #include #include +#include #include #include @@ -17,6 +19,8 @@ static DEFINE_IDA(nsim_bus_dev_ids); static LIST_HEAD(nsim_bus_dev_list); static DEFINE_MUTEX(nsim_bus_dev_list_lock); static bool nsim_bus_enable; +static refcount_t nsim_bus_devs; /* Including the bus itself. */ +static DECLARE_COMPLETION(nsim_bus_devs_released); static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev) { @@ -121,6 +125,8 @@ static void nsim_bus_dev_release(struct device *dev) nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev); kfree(nsim_bus_dev); + if (refcount_dec_and_test(&nsim_bus_devs)) + complete(&nsim_bus_devs_released); } static struct device_type nsim_bus_dev_type = { @@ -170,6 +176,7 @@ new_device_store(const struct bus_type *bus, const char *buf, size_t count) goto err; } + refcount_inc(&nsim_bus_devs); /* Allow using nsim_bus_dev */ smp_store_release(&nsim_bus_dev->init, true); @@ -326,6 +333,7 @@ int nsim_bus_init(void) err = driver_register(&nsim_driver); if (err) goto err_bus_unregister; + refcount_set(&nsim_bus_devs, 1); /* Allow using resources */ smp_store_release(&nsim_bus_enable, true); return 0; @@ -341,6 +349,8 @@ void nsim_bus_exit(void) /* Disallow using resources */ smp_store_release(&nsim_bus_enable, false); + if (refcount_dec_and_test(&nsim_bus_devs)) + complete(&nsim_bus_devs_released); mutex_lock(&nsim_bus_dev_list_lock); list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) { @@ -349,6 +359,8 @@ void nsim_bus_exit(void) } mutex_unlock(&nsim_bus_dev_list_lock); + wait_for_completion(&nsim_bus_devs_released); + driver_unregister(&nsim_driver); bus_unregister(&nsim_bus); } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index b4d3b9cde8..92a7a36b93 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work) trap_report_dw.work); nsim_dev = nsim_trap_data->nsim_dev; - /* For each running port and enabled packet trap, generate a UDP - * packet with a random 5-tuple and report it. - */ if (!devl_trylock(priv_to_devlink(nsim_dev))) { - schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0); + schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1); return; } + /* For each running port and enabled packet trap, generate a UDP + * packet with a random 5-tuple and report it. + */ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) { if (!netif_running(nsim_dev_port->ns->netdev)) continue; diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index eb04ed715d..70e8bdf34b 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -63,91 +63,45 @@ nsim_dev_dummy_reporter_recover(struct devlink_health_reporter *reporter, static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len) { char *binary; - int err; int i; - err = devlink_fmsg_bool_pair_put(fmsg, "test_bool", true); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4); - if (err) - return err; - err = devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring"); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "test_bool", true); + devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1); + devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3); + devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4); + devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring"); binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN); if (!binary) return -ENOMEM; get_random_bytes(binary, binary_len); - err = devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len); + devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len); kfree(binary); - if (err) - return err; - err = devlink_fmsg_pair_nest_start(fmsg, "test_nest"); - if (err) - return err; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - err = devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false); - if (err) - return err; - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - err = devlink_fmsg_pair_nest_end(fmsg); - if (err) - return err; + devlink_fmsg_pair_nest_start(fmsg, "test_nest"); + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false); + devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false); + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array"); - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + for (i = 0; i < 10; i++) + devlink_fmsg_u32_put(fmsg, i); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects"); - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array"); - if (err) - return err; for (i = 0; i < 10; i++) { - err = devlink_fmsg_u32_put(fmsg, i); - if (err) - return err; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_bool_pair_put(fmsg, "in_array_nested_test_bool", + false); + devlink_fmsg_u8_pair_put(fmsg, "in_array_nested_test_u8", i); + devlink_fmsg_obj_nest_end(fmsg); } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + devlink_fmsg_arr_pair_nest_end(fmsg); - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - err = devlink_fmsg_bool_pair_put(fmsg, - "in_array_nested_test_bool", - false); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, - "in_array_nested_test_u8", - i); - if (err) - return err; - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - } - return devlink_fmsg_arr_pair_nest_end(fmsg); + return 0; } static int @@ -157,14 +111,10 @@ nsim_dev_dummy_reporter_dump(struct devlink_health_reporter *reporter, { struct nsim_dev_health *health = devlink_health_reporter_priv(reporter); struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx; - int err; - if (ctx) { - err = devlink_fmsg_string_pair_put(fmsg, "break_message", - ctx->break_msg); - if (err) - return err; - } + if (ctx) + devlink_fmsg_string_pair_put(fmsg, "break_message", ctx->break_msg); + return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len); } @@ -174,15 +124,11 @@ nsim_dev_dummy_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct nsim_dev_health *health = devlink_health_reporter_priv(reporter); - int err; - if (health->recovered_break_msg) { - err = devlink_fmsg_string_pair_put(fmsg, - "recovered_break_message", - health->recovered_break_msg); - if (err) - return err; - } + if (health->recovered_break_msg) + devlink_fmsg_string_pair_put(fmsg, "recovered_break_message", + health->recovered_break_msg); + return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len); } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index d8ca82addf..77e8250282 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -475,4 +475,5 @@ static void __exit nsim_module_exit(void) module_init(nsim_module_init); module_exit(nsim_module_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simulated networking device for testing"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c new file mode 100644 index 0000000000..39171380cc --- /dev/null +++ b/drivers/net/netkit.c @@ -0,0 +1,960 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Isovalent */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRV_NAME "netkit" + +struct netkit { + /* Needed in fast-path */ + struct net_device __rcu *peer; + struct bpf_mprog_entry __rcu *active; + enum netkit_action policy; + struct bpf_mprog_bundle bundle; + + /* Needed in slow-path */ + enum netkit_mode mode; + bool primary; + u32 headroom; +}; + +struct netkit_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +static __always_inline int +netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb, + enum netkit_action ret) +{ + const struct bpf_mprog_fp *fp; + const struct bpf_prog *prog; + + bpf_mprog_foreach_prog(entry, fp, prog) { + bpf_compute_data_pointers(skb); + ret = bpf_prog_run(prog, skb); + if (ret != NETKIT_NEXT) + break; + } + return ret; +} + +static void netkit_prep_forward(struct sk_buff *skb, bool xnet) +{ + skb_scrub_packet(skb, xnet); + skb->priority = 0; + nf_skip_egress(skb, true); +} + +static struct netkit *netkit_priv(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + enum netkit_action ret = READ_ONCE(nk->policy); + netdev_tx_t ret_dev = NET_XMIT_SUCCESS; + const struct bpf_mprog_entry *entry; + struct net_device *peer; + int len = skb->len; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (unlikely(!peer || !(peer->flags & IFF_UP) || + !pskb_may_pull(skb, ETH_HLEN) || + skb_orphan_frags(skb, GFP_ATOMIC))) + goto drop; + netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer))); + skb->dev = peer; + entry = rcu_dereference(nk->active); + if (entry) + ret = netkit_run(entry, skb, ret); + switch (ret) { + case NETKIT_NEXT: + case NETKIT_PASS: + skb->protocol = eth_type_trans(skb, skb->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) { + dev_sw_netstats_tx_add(dev, 1, len); + dev_sw_netstats_rx_add(peer, len); + } else { + goto drop_stats; + } + break; + case NETKIT_REDIRECT: + dev_sw_netstats_tx_add(dev, 1, len); + skb_do_redirect(skb); + break; + case NETKIT_DROP: + default: +drop: + kfree_skb(skb); +drop_stats: + dev_core_stats_tx_dropped_inc(dev); + ret_dev = NET_XMIT_DROP; + break; + } + rcu_read_unlock(); + return ret_dev; +} + +static int netkit_open(struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + if (!peer) + return -ENOTCONN; + if (peer->flags & IFF_UP) { + netif_carrier_on(dev); + netif_carrier_on(peer); + } + return 0; +} + +static int netkit_close(struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + netif_carrier_off(dev); + if (peer) + netif_carrier_off(peer); + return 0; +} + +static int netkit_get_iflink(const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer; + int iflink = 0; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (peer) + iflink = peer->ifindex; + rcu_read_unlock(); + return iflink; +} + +static void netkit_set_multicast(struct net_device *dev) +{ + /* Nothing to do, we receive whatever gets pushed to us! */ +} + +static void netkit_set_headroom(struct net_device *dev, int headroom) +{ + struct netkit *nk = netkit_priv(dev), *nk2; + struct net_device *peer; + + if (headroom < 0) + headroom = NET_SKB_PAD; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (unlikely(!peer)) + goto out; + + nk2 = netkit_priv(peer); + nk->headroom = headroom; + headroom = max(nk->headroom, nk2->headroom); + + peer->needed_headroom = headroom; + dev->needed_headroom = headroom; +out: + rcu_read_unlock(); +} + +INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev) +{ + return rcu_dereference(netkit_priv(dev)->peer); +} + +static void netkit_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + dev_fetch_sw_netstats(stats, dev->tstats); + stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped); +} + +static void netkit_uninit(struct net_device *dev); + +static const struct net_device_ops netkit_netdev_ops = { + .ndo_open = netkit_open, + .ndo_stop = netkit_close, + .ndo_start_xmit = netkit_xmit, + .ndo_set_rx_mode = netkit_set_multicast, + .ndo_set_rx_headroom = netkit_set_headroom, + .ndo_get_iflink = netkit_get_iflink, + .ndo_get_peer_dev = netkit_peer_dev, + .ndo_get_stats64 = netkit_get_stats, + .ndo_uninit = netkit_uninit, + .ndo_features_check = passthru_features_check, +}; + +static void netkit_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); +} + +static const struct ethtool_ops netkit_ethtool_ops = { + .get_drvinfo = netkit_get_drvinfo, +}; + +static void netkit_setup(struct net_device *dev) +{ + static const netdev_features_t netkit_features_hw_vlan = + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; + static const netdev_features_t netkit_features = + netkit_features_hw_vlan | + NETIF_F_SG | + NETIF_F_FRAGLIST | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HIGHDMA | + NETIF_F_GSO_SOFTWARE | + NETIF_F_GSO_ENCAP_ALL; + + ether_setup(dev); + dev->max_mtu = ETH_MAX_MTU; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + + dev->flags |= IFF_NOARP; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_PHONY_HEADROOM; + dev->priv_flags |= IFF_NO_QUEUE; + + dev->ethtool_ops = &netkit_ethtool_ops; + dev->netdev_ops = &netkit_netdev_ops; + + dev->features |= netkit_features | NETIF_F_LLTX; + dev->hw_features = netkit_features; + dev->hw_enc_features = netkit_features; + dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; + dev->vlan_features = dev->features & ~netkit_features_hw_vlan; + + dev->needs_free_netdev = true; + + netif_set_tso_max_size(dev, GSO_MAX_SIZE); +} + +static struct net *netkit_get_link_net(const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + return peer ? dev_net(peer) : dev_net(dev); +} + +static int netkit_check_policy(int policy, struct nlattr *tb, + struct netlink_ext_ack *extack) +{ + switch (policy) { + case NETKIT_PASS: + case NETKIT_DROP: + return 0; + default: + NL_SET_ERR_MSG_ATTR(extack, tb, + "Provided default xmit policy not supported"); + return -EINVAL; + } +} + +static int netkit_check_mode(int mode, struct nlattr *tb, + struct netlink_ext_ack *extack) +{ + switch (mode) { + case NETKIT_L2: + case NETKIT_L3: + return 0; + default: + NL_SET_ERR_MSG_ATTR(extack, tb, + "Provided device mode can only be L2 or L3"); + return -EINVAL; + } +} + +static int netkit_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct nlattr *attr = tb[IFLA_ADDRESS]; + + if (!attr) + return 0; + NL_SET_ERR_MSG_ATTR(extack, attr, + "Setting Ethernet address is not supported"); + return -EOPNOTSUPP; +} + +static struct rtnl_link_ops netkit_link_ops; + +static int netkit_new_link(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr; + enum netkit_action default_prim = NETKIT_PASS; + enum netkit_action default_peer = NETKIT_PASS; + enum netkit_mode mode = NETKIT_L3; + unsigned char ifname_assign_type; + struct ifinfomsg *ifmp = NULL; + struct net_device *peer; + char ifname[IFNAMSIZ]; + struct netkit *nk; + struct net *net; + int err; + + if (data) { + if (data[IFLA_NETKIT_MODE]) { + attr = data[IFLA_NETKIT_MODE]; + mode = nla_get_u32(attr); + err = netkit_check_mode(mode, attr, extack); + if (err < 0) + return err; + } + if (data[IFLA_NETKIT_PEER_INFO]) { + attr = data[IFLA_NETKIT_PEER_INFO]; + ifmp = nla_data(attr); + err = rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack); + if (err < 0) + return err; + err = netkit_validate(peer_tb, NULL, extack); + if (err < 0) + return err; + tbp = peer_tb; + } + if (data[IFLA_NETKIT_POLICY]) { + attr = data[IFLA_NETKIT_POLICY]; + default_prim = nla_get_u32(attr); + err = netkit_check_policy(default_prim, attr, extack); + if (err < 0) + return err; + } + if (data[IFLA_NETKIT_PEER_POLICY]) { + attr = data[IFLA_NETKIT_PEER_POLICY]; + default_peer = nla_get_u32(attr); + err = netkit_check_policy(default_peer, attr, extack); + if (err < 0) + return err; + } + } + + if (ifmp && tbp[IFLA_IFNAME]) { + nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + ifname_assign_type = NET_NAME_USER; + } else { + strscpy(ifname, "nk%d", IFNAMSIZ); + ifname_assign_type = NET_NAME_ENUM; + } + + net = rtnl_link_get_net(src_net, tbp); + if (IS_ERR(net)) + return PTR_ERR(net); + + peer = rtnl_create_link(net, ifname, ifname_assign_type, + &netkit_link_ops, tbp, extack); + if (IS_ERR(peer)) { + put_net(net); + return PTR_ERR(peer); + } + + netif_inherit_tso_max(peer, dev); + + if (mode == NETKIT_L2) + eth_hw_addr_random(peer); + if (ifmp && dev->ifindex) + peer->ifindex = ifmp->ifi_index; + + nk = netkit_priv(peer); + nk->primary = false; + nk->policy = default_peer; + nk->mode = mode; + bpf_mprog_bundle_init(&nk->bundle); + + err = register_netdevice(peer); + put_net(net); + if (err < 0) + goto err_register_peer; + netif_carrier_off(peer); + if (mode == NETKIT_L2) + dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL); + + err = rtnl_configure_link(peer, NULL, 0, NULL); + if (err < 0) + goto err_configure_peer; + + if (mode == NETKIT_L2) + eth_hw_addr_random(dev); + if (tb[IFLA_IFNAME]) + nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + else + strscpy(dev->name, "nk%d", IFNAMSIZ); + + nk = netkit_priv(dev); + nk->primary = true; + nk->policy = default_prim; + nk->mode = mode; + bpf_mprog_bundle_init(&nk->bundle); + + err = register_netdevice(dev); + if (err < 0) + goto err_configure_peer; + netif_carrier_off(dev); + if (mode == NETKIT_L2) + dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL); + + rcu_assign_pointer(netkit_priv(dev)->peer, peer); + rcu_assign_pointer(netkit_priv(peer)->peer, dev); + return 0; +err_configure_peer: + unregister_netdevice(peer); + return err; +err_register_peer: + free_netdev(peer); + return err; +} + +static struct bpf_mprog_entry *netkit_entry_fetch(struct net_device *dev, + bool bundle_fallback) +{ + struct netkit *nk = netkit_priv(dev); + struct bpf_mprog_entry *entry; + + ASSERT_RTNL(); + entry = rcu_dereference_rtnl(nk->active); + if (entry) + return entry; + if (bundle_fallback) + return &nk->bundle.a; + return NULL; +} + +static void netkit_entry_update(struct net_device *dev, + struct bpf_mprog_entry *entry) +{ + struct netkit *nk = netkit_priv(dev); + + ASSERT_RTNL(); + rcu_assign_pointer(nk->active, entry); +} + +static void netkit_entry_sync(void) +{ + synchronize_rcu(); +} + +static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 which) +{ + struct net_device *dev; + struct netkit *nk; + + ASSERT_RTNL(); + + switch (which) { + case BPF_NETKIT_PRIMARY: + case BPF_NETKIT_PEER: + break; + default: + return ERR_PTR(-EINVAL); + } + + dev = __dev_get_by_index(net, ifindex); + if (!dev) + return ERR_PTR(-ENODEV); + if (dev->netdev_ops != &netkit_netdev_ops) + return ERR_PTR(-ENXIO); + + nk = netkit_priv(dev); + if (!nk->primary) + return ERR_PTR(-EACCES); + if (which == BPF_NETKIT_PEER) { + dev = rcu_dereference_rtnl(nk->peer); + if (!dev) + return ERR_PTR(-ENODEV); + } + return dev; +} + +int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_mprog_entry *entry, *entry_new; + struct bpf_prog *replace_prog = NULL; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex, + attr->attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + entry = netkit_entry_fetch(dev, true); + if (attr->attach_flags & BPF_F_REPLACE) { + replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, + prog->type); + if (IS_ERR(replace_prog)) { + ret = PTR_ERR(replace_prog); + replace_prog = NULL; + goto out; + } + } + ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog, + attr->attach_flags, attr->relative_fd, + attr->expected_revision); + if (!ret) { + if (entry != entry_new) { + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + } + bpf_mprog_commit(entry); + } +out: + if (replace_prog) + bpf_prog_put(replace_prog); + rtnl_unlock(); + return ret; +} + +int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex, + attr->attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags, + attr->relative_fd, attr->expected_revision); + if (!ret) { + if (!bpf_mprog_total(entry_new)) + entry_new = NULL; + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + bpf_mprog_commit(entry); + } +out: + rtnl_unlock(); + return ret; +} + +int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) +{ + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, + attr->query.target_ifindex, + attr->query.attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false)); +out: + rtnl_unlock(); + return ret; +} + +static struct netkit_link *netkit_link(const struct bpf_link *link) +{ + return container_of(link, struct netkit_link, link); +} + +static int netkit_link_prog_attach(struct bpf_link *link, u32 flags, + u32 id_or_fd, u64 revision) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev = nkl->dev; + int ret; + + ASSERT_RTNL(); + entry = netkit_entry_fetch(dev, true); + ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags, + id_or_fd, revision); + if (!ret) { + if (entry != entry_new) { + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + } + bpf_mprog_commit(entry); + } + return ret; +} + +static void netkit_link_release(struct bpf_link *link) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + dev = nkl->dev; + if (!dev) + goto out; + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0); + if (!ret) { + if (!bpf_mprog_total(entry_new)) + entry_new = NULL; + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + bpf_mprog_commit(entry); + nkl->dev = NULL; + } +out: + WARN_ON_ONCE(ret); + rtnl_unlock(); +} + +static int netkit_link_update(struct bpf_link *link, struct bpf_prog *nprog, + struct bpf_prog *oprog) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + dev = nkl->dev; + if (!dev) { + ret = -ENOLINK; + goto out; + } + if (oprog && link->prog != oprog) { + ret = -EPERM; + goto out; + } + oprog = link->prog; + if (oprog == nprog) { + bpf_prog_put(nprog); + goto out; + } + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog, + BPF_F_REPLACE | BPF_F_ID, + link->prog->aux->id, 0); + if (!ret) { + WARN_ON_ONCE(entry != entry_new); + oprog = xchg(&link->prog, nprog); + bpf_prog_put(oprog); + bpf_mprog_commit(entry); + } +out: + rtnl_unlock(); + return ret; +} + +static void netkit_link_dealloc(struct bpf_link *link) +{ + kfree(netkit_link(link)); +} + +static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq) +{ + const struct netkit_link *nkl = netkit_link(link); + u32 ifindex = 0; + + rtnl_lock(); + if (nkl->dev) + ifindex = nkl->dev->ifindex; + rtnl_unlock(); + + seq_printf(seq, "ifindex:\t%u\n", ifindex); + seq_printf(seq, "attach_type:\t%u (%s)\n", + nkl->location, + nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer"); +} + +static int netkit_link_fill_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + const struct netkit_link *nkl = netkit_link(link); + u32 ifindex = 0; + + rtnl_lock(); + if (nkl->dev) + ifindex = nkl->dev->ifindex; + rtnl_unlock(); + + info->netkit.ifindex = ifindex; + info->netkit.attach_type = nkl->location; + return 0; +} + +static int netkit_link_detach(struct bpf_link *link) +{ + netkit_link_release(link); + return 0; +} + +static const struct bpf_link_ops netkit_link_lops = { + .release = netkit_link_release, + .detach = netkit_link_detach, + .dealloc = netkit_link_dealloc, + .update_prog = netkit_link_update, + .show_fdinfo = netkit_link_fdinfo, + .fill_link_info = netkit_link_fill_info, +}; + +static int netkit_link_init(struct netkit_link *nkl, + struct bpf_link_primer *link_primer, + const union bpf_attr *attr, + struct net_device *dev, + struct bpf_prog *prog) +{ + bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT, + &netkit_link_lops, prog); + nkl->location = attr->link_create.attach_type; + nkl->dev = dev; + return bpf_link_prime(&nkl->link, link_primer); +} + +int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_link_primer link_primer; + struct netkit_link *nkl; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, + attr->link_create.target_ifindex, + attr->link_create.attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + nkl = kzalloc(sizeof(*nkl), GFP_KERNEL_ACCOUNT); + if (!nkl) { + ret = -ENOMEM; + goto out; + } + ret = netkit_link_init(nkl, &link_primer, attr, dev, prog); + if (ret) { + kfree(nkl); + goto out; + } + ret = netkit_link_prog_attach(&nkl->link, + attr->link_create.flags, + attr->link_create.netkit.relative_fd, + attr->link_create.netkit.expected_revision); + if (ret) { + nkl->dev = NULL; + bpf_link_cleanup(&link_primer); + goto out; + } + ret = bpf_link_settle(&link_primer); +out: + rtnl_unlock(); + return ret; +} + +static void netkit_release_all(struct net_device *dev) +{ + struct bpf_mprog_entry *entry; + struct bpf_tuple tuple = {}; + struct bpf_mprog_fp *fp; + struct bpf_mprog_cp *cp; + + entry = netkit_entry_fetch(dev, false); + if (!entry) + return; + netkit_entry_update(dev, NULL); + netkit_entry_sync(); + bpf_mprog_foreach_tuple(entry, fp, cp, tuple) { + if (tuple.link) + netkit_link(tuple.link)->dev = NULL; + else + bpf_prog_put(tuple.prog); + } +} + +static void netkit_uninit(struct net_device *dev) +{ + netkit_release_all(dev); +} + +static void netkit_del_link(struct net_device *dev, struct list_head *head) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + RCU_INIT_POINTER(nk->peer, NULL); + unregister_netdevice_queue(dev, head); + if (peer) { + nk = netkit_priv(peer); + RCU_INIT_POINTER(nk->peer, NULL); + unregister_netdevice_queue(peer, head); + } +} + +static int netkit_change_link(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + enum netkit_action policy; + struct nlattr *attr; + int err; + + if (!nk->primary) { + NL_SET_ERR_MSG(extack, + "netkit link settings can be changed only through the primary device"); + return -EACCES; + } + + if (data[IFLA_NETKIT_MODE]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE], + "netkit link operating mode cannot be changed after device creation"); + return -EACCES; + } + + if (data[IFLA_NETKIT_PEER_INFO]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO], + "netkit peer info cannot be changed after device creation"); + return -EINVAL; + } + + if (data[IFLA_NETKIT_POLICY]) { + attr = data[IFLA_NETKIT_POLICY]; + policy = nla_get_u32(attr); + err = netkit_check_policy(policy, attr, extack); + if (err) + return err; + WRITE_ONCE(nk->policy, policy); + } + + if (data[IFLA_NETKIT_PEER_POLICY]) { + err = -EOPNOTSUPP; + attr = data[IFLA_NETKIT_PEER_POLICY]; + policy = nla_get_u32(attr); + if (peer) + err = netkit_check_policy(policy, attr, extack); + if (err) + return err; + nk = netkit_priv(peer); + WRITE_ONCE(nk->policy, policy); + } + + return 0; +} + +static size_t netkit_get_size(const struct net_device *dev) +{ + return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */ + nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */ + 0; +} + +static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + if (nla_put_u8(skb, IFLA_NETKIT_PRIMARY, nk->primary)) + return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_POLICY, nk->policy)) + return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode)) + return -EMSGSIZE; + + if (peer) { + nk = netkit_priv(peer); + if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy)) + return -EMSGSIZE; + } + + return 0; +} + +static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = { + [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) }, + [IFLA_NETKIT_POLICY] = { .type = NLA_U32 }, + [IFLA_NETKIT_MODE] = { .type = NLA_U32 }, + [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 }, + [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT, + .reject_message = "Primary attribute is read-only" }, +}; + +static struct rtnl_link_ops netkit_link_ops = { + .kind = DRV_NAME, + .priv_size = sizeof(struct netkit), + .setup = netkit_setup, + .newlink = netkit_new_link, + .dellink = netkit_del_link, + .changelink = netkit_change_link, + .get_link_net = netkit_get_link_net, + .get_size = netkit_get_size, + .fill_info = netkit_fill_info, + .policy = netkit_policy, + .validate = netkit_validate, + .maxtype = IFLA_NETKIT_MAX, +}; + +static __init int netkit_init(void) +{ + BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT || + (int)NETKIT_PASS != (int)TCX_PASS || + (int)NETKIT_DROP != (int)TCX_DROP || + (int)NETKIT_REDIRECT != (int)TCX_REDIRECT); + + return rtnl_link_register(&netkit_link_ops); +} + +static __exit void netkit_exit(void) +{ + rtnl_link_unregister(&netkit_link_ops); +} + +module_init(netkit_init); +module_exit(netkit_exit); + +MODULE_DESCRIPTION("BPF-programmable network device"); +MODULE_AUTHOR("Daniel Borkmann "); +MODULE_AUTHOR("Nikolay Aleksandrov "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 4dbc21f604..31f0beba63 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -1090,6 +1090,28 @@ static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs, return 0; } +static int xpcs_get_state_2500basex(struct dw_xpcs *xpcs, + struct phylink_link_state *state) +{ + int ret; + + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_STS); + if (ret < 0) { + state->link = 0; + return ret; + } + + state->link = !!(ret & DW_VR_MII_MMD_STS_LINK_STS); + if (!state->link) + return 0; + + state->speed = SPEED_2500; + state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX; + state->duplex = DUPLEX_FULL; + + return 0; +} + static void xpcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { @@ -1127,6 +1149,13 @@ static void xpcs_get_state(struct phylink_pcs *pcs, ERR_PTR(ret)); } break; + case DW_2500BASEX: + ret = xpcs_get_state_2500basex(xpcs, state); + if (ret) { + pr_err("xpcs_get_state_2500basex returned %pe\n", + ERR_PTR(ret)); + } + break; default: return; } diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index 39a90417e5..96c36b32ca 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -55,6 +55,8 @@ /* Clause 37 Defines */ /* VR MII MMD registers offsets */ #define DW_VR_MII_MMD_CTRL 0x0000 +#define DW_VR_MII_MMD_STS 0x0001 +#define DW_VR_MII_MMD_STS_LINK_STS BIT(2) #define DW_VR_MII_DIG_CTRL1 0x8000 #define DW_VR_MII_AN_CTRL 0x8001 #define DW_VR_MII_AN_INTR_STS 0x8002 diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 107880d13d..421d2b6291 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -69,9 +69,9 @@ config SFP comment "MII PHY device drivers" config AMD_PHY - tristate "AMD PHYs" + tristate "AMD and Altima PHYs" help - Currently supports the am79c874 + Currently supports the AMD am79c874 and Altima AC101L. config MESON_GXL_PHY tristate "Amlogic Meson GXL Internal PHY" diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 001bb6d8bf..930b15fa6c 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -13,6 +13,7 @@ #include #include +#define PHY_ID_AC101L 0x00225520 #define PHY_ID_AM79C874 0x0022561b #define MII_AM79C_IR 17 /* Interrupt Status/Control Register */ @@ -87,19 +88,31 @@ static irqreturn_t am79c_handle_interrupt(struct phy_device *phydev) return IRQ_HANDLED; } -static struct phy_driver am79c_driver[] = { { - .phy_id = PHY_ID_AM79C874, - .name = "AM79C874", - .phy_id_mask = 0xfffffff0, - /* PHY_BASIC_FEATURES */ - .config_init = am79c_config_init, - .config_intr = am79c_config_intr, - .handle_interrupt = am79c_handle_interrupt, -} }; +static struct phy_driver am79c_drivers[] = { + { + .phy_id = PHY_ID_AM79C874, + .name = "AM79C874", + .phy_id_mask = 0xfffffff0, + /* PHY_BASIC_FEATURES */ + .config_init = am79c_config_init, + .config_intr = am79c_config_intr, + .handle_interrupt = am79c_handle_interrupt, + }, + { + .phy_id = PHY_ID_AC101L, + .name = "AC101L", + .phy_id_mask = 0xfffffff0, + /* PHY_BASIC_FEATURES */ + .config_init = am79c_config_init, + .config_intr = am79c_config_intr, + .handle_interrupt = am79c_handle_interrupt, + }, +}; -module_phy_driver(am79c_driver); +module_phy_driver(am79c_drivers); static struct mdio_device_id __maybe_unused amd_tbl[] = { + { PHY_ID_AC101L, 0xfffffff0 }, { PHY_ID_AM79C874, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 37fb033e1c..ef203b0807 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -2104,7 +2104,7 @@ static struct phy_driver at803x_driver[] = { .write_page = at803x_write_page, .get_features = at803x_get_features, .read_status = at803x_read_status, - .config_intr = &at803x_config_intr, + .config_intr = at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, .set_tunable = at803x_set_tunable, @@ -2134,7 +2134,7 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, /* PHY_BASIC_FEATURES */ - .config_intr = &at803x_config_intr, + .config_intr = at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .cable_test_start = at803x_cable_test_start, .cable_test_get_status = at803x_cable_test_get_status, @@ -2150,7 +2150,7 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, /* PHY_BASIC_FEATURES */ - .config_intr = &at803x_config_intr, + .config_intr = at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .cable_test_start = at803x_cable_test_start, .cable_test_get_status = at803x_cable_test_get_status, diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c index 0f1e617a26..eb74a8cf8d 100644 --- a/drivers/net/phy/ax88796b.c +++ b/drivers/net/phy/ax88796b.c @@ -90,7 +90,7 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev) */ if (phydev->state == PHY_NOLINK) { phy_init_hw(phydev); - phy_start_aneg(phydev); + _phy_start_aneg(phydev); } } diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c index ef00d61630..cb4b91af5e 100644 --- a/drivers/net/phy/bcm-phy-ptp.c +++ b/drivers/net/phy/bcm-phy-ptp.c @@ -942,3 +942,4 @@ struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev) EXPORT_SYMBOL_GPL(bcm_ptp_probe); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Broadcom PHY PTP driver"); diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index cc28581076..e81404bf89 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -223,3 +223,4 @@ static struct phy_driver bcm87xx_driver[] = { module_phy_driver(bcm87xx_driver); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Broadcom BCM87xx PHY driver"); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 04b2e6eeb1..3a62710567 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -704,16 +704,21 @@ static int brcm_fet_config_init(struct phy_device *phydev) if (err < 0 && err != -EIO) return err; + /* Read to clear status bits */ reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; /* Unmask events we are interested in and mask interrupts globally. */ - reg = MII_BRCM_FET_IR_DUPLEX_EN | - MII_BRCM_FET_IR_SPEED_EN | - MII_BRCM_FET_IR_LINK_EN | - MII_BRCM_FET_IR_ENABLE | - MII_BRCM_FET_IR_MASK; + if (phydev->phy_id == PHY_ID_BCM5221) + reg = MII_BRCM_FET_IR_ENABLE | + MII_BRCM_FET_IR_MASK; + else + reg = MII_BRCM_FET_IR_DUPLEX_EN | + MII_BRCM_FET_IR_SPEED_EN | + MII_BRCM_FET_IR_LINK_EN | + MII_BRCM_FET_IR_ENABLE | + MII_BRCM_FET_IR_MASK; err = phy_write(phydev, MII_BRCM_FET_INTREG, reg); if (err < 0) @@ -726,42 +731,49 @@ static int brcm_fet_config_init(struct phy_device *phydev) reg = brcmtest | MII_BRCM_FET_BT_SRE; - err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); - if (err < 0) - return err; + phy_lock_mdio_bus(phydev); - /* Set the LED mode */ - reg = phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4); - if (reg < 0) { - err = reg; - goto done; + err = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); + if (err < 0) { + phy_unlock_mdio_bus(phydev); + return err; } - reg &= ~MII_BRCM_FET_SHDW_AM4_LED_MASK; - reg |= MII_BRCM_FET_SHDW_AM4_LED_MODE1; + if (phydev->phy_id != PHY_ID_BCM5221) { + /* Set the LED mode */ + reg = __phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4); + if (reg < 0) { + err = reg; + goto done; + } - err = phy_write(phydev, MII_BRCM_FET_SHDW_AUXMODE4, reg); - if (err < 0) - goto done; + err = __phy_modify(phydev, MII_BRCM_FET_SHDW_AUXMODE4, + MII_BRCM_FET_SHDW_AM4_LED_MASK, + MII_BRCM_FET_SHDW_AM4_LED_MODE1); + if (err < 0) + goto done; - /* Enable auto MDIX */ - err = phy_set_bits(phydev, MII_BRCM_FET_SHDW_MISCCTRL, - MII_BRCM_FET_SHDW_MC_FAME); - if (err < 0) - goto done; + /* Enable auto MDIX */ + err = __phy_set_bits(phydev, MII_BRCM_FET_SHDW_MISCCTRL, + MII_BRCM_FET_SHDW_MC_FAME); + if (err < 0) + goto done; + } if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) { /* Enable auto power down */ - err = phy_set_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, - MII_BRCM_FET_SHDW_AS2_APDE); + err = __phy_set_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, + MII_BRCM_FET_SHDW_AS2_APDE); } done: /* Disable shadow register access */ - err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); + err2 = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); if (!err) err = err2; + phy_unlock_mdio_bus(phydev); + return err; } @@ -840,23 +852,86 @@ static int brcm_fet_suspend(struct phy_device *phydev) reg = brcmtest | MII_BRCM_FET_BT_SRE; - err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); - if (err < 0) + phy_lock_mdio_bus(phydev); + + err = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); + if (err < 0) { + phy_unlock_mdio_bus(phydev); return err; + } + + if (phydev->phy_id == PHY_ID_BCM5221) + /* Force Low Power Mode with clock enabled */ + reg = BCM5221_SHDW_AM4_EN_CLK_LPM | BCM5221_SHDW_AM4_FORCE_LPM; + else + /* Set standby mode */ + reg = MII_BRCM_FET_SHDW_AM4_STANDBY; - /* Set standby mode */ - err = phy_modify(phydev, MII_BRCM_FET_SHDW_AUXMODE4, - MII_BRCM_FET_SHDW_AM4_STANDBY, - MII_BRCM_FET_SHDW_AM4_STANDBY); + err = __phy_set_bits(phydev, MII_BRCM_FET_SHDW_AUXMODE4, reg); /* Disable shadow register access */ - err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); + err2 = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); if (!err) err = err2; + phy_unlock_mdio_bus(phydev); + return err; } +static int bcm5221_config_aneg(struct phy_device *phydev) +{ + int ret, val; + + ret = genphy_config_aneg(phydev); + if (ret) + return ret; + + switch (phydev->mdix_ctrl) { + case ETH_TP_MDI: + val = BCM5221_AEGSR_MDIX_DIS; + break; + case ETH_TP_MDI_X: + val = BCM5221_AEGSR_MDIX_DIS | BCM5221_AEGSR_MDIX_MAN_SWAP; + break; + case ETH_TP_MDI_AUTO: + val = 0; + break; + default: + return 0; + } + + return phy_modify(phydev, BCM5221_AEGSR, BCM5221_AEGSR_MDIX_MAN_SWAP | + BCM5221_AEGSR_MDIX_DIS, + val); +} + +static int bcm5221_read_status(struct phy_device *phydev) +{ + int ret; + + /* Read MDIX status */ + ret = phy_read(phydev, BCM5221_AEGSR); + if (ret < 0) + return ret; + + if (ret & BCM5221_AEGSR_MDIX_DIS) { + if (ret & BCM5221_AEGSR_MDIX_MAN_SWAP) + phydev->mdix_ctrl = ETH_TP_MDI_X; + else + phydev->mdix_ctrl = ETH_TP_MDI; + } else { + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + } + + if (ret & BCM5221_AEGSR_MDIX_STATUS) + phydev->mdix = ETH_TP_MDI_X; + else + phydev->mdix = ETH_TP_MDI; + + return genphy_read_status(phydev); +} + static void bcm54xx_phy_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { @@ -1221,6 +1296,18 @@ static struct phy_driver broadcom_drivers[] = { .handle_interrupt = brcm_fet_handle_interrupt, .suspend = brcm_fet_suspend, .resume = brcm_fet_config_init, +}, { + .phy_id = PHY_ID_BCM5221, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM5221", + /* PHY_BASIC_FEATURES */ + .config_init = brcm_fet_config_init, + .config_intr = brcm_fet_config_intr, + .handle_interrupt = brcm_fet_handle_interrupt, + .suspend = brcm_fet_suspend, + .resume = brcm_fet_config_init, + .config_aneg = bcm5221_config_aneg, + .read_status = bcm5221_read_status, }, { .phy_id = PHY_ID_BCM5395, .phy_id_mask = 0xfffffff0, @@ -1296,6 +1383,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM50610M, 0xfffffff0 }, { PHY_ID_BCM57780, 0xfffffff0 }, { PHY_ID_BCMAC131, 0xfffffff0 }, + { PHY_ID_BCM5221, 0xfffffff0 }, { PHY_ID_BCM5241, 0xfffffff0 }, { PHY_ID_BCM5395, 0xfffffff0 }, { PHY_ID_BCM53125, 0xfffffff0 }, diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index e397e7d642..5f08f9d38b 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -159,6 +159,23 @@ #define DP83867_LED_DRV_EN(x) BIT((x) * 4) #define DP83867_LED_DRV_VAL(x) BIT((x) * 4 + 1) +#define DP83867_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4)) +#define DP83867_LED_FN_MASK(idx) (0xf << ((idx) * 4)) +#define DP83867_LED_FN_RX_ERR 0xe /* Receive Error */ +#define DP83867_LED_FN_RX_TX_ERR 0xd /* Receive Error or Transmit Error */ +#define DP83867_LED_FN_LINK_RX_TX 0xb /* Link established, blink for rx or tx activity */ +#define DP83867_LED_FN_FULL_DUPLEX 0xa /* Full duplex */ +#define DP83867_LED_FN_LINK_100_1000_BT 0x9 /* 100/1000BT link established */ +#define DP83867_LED_FN_LINK_10_100_BT 0x8 /* 10/100BT link established */ +#define DP83867_LED_FN_LINK_10_BT 0x7 /* 10BT link established */ +#define DP83867_LED_FN_LINK_100_BTX 0x6 /* 100 BTX link established */ +#define DP83867_LED_FN_LINK_1000_BT 0x5 /* 1000 BT link established */ +#define DP83867_LED_FN_COLLISION 0x4 /* Collision detected */ +#define DP83867_LED_FN_RX 0x3 /* Receive activity */ +#define DP83867_LED_FN_TX 0x2 /* Transmit activity */ +#define DP83867_LED_FN_RX_TX 0x1 /* Receive or Transmit activity */ +#define DP83867_LED_FN_LINK 0x0 /* Link established */ + enum { DP83867_PORT_MIRROING_KEEP, DP83867_PORT_MIRROING_EN, @@ -1018,6 +1035,123 @@ dp83867_led_brightness_set(struct phy_device *phydev, val); } +static int dp83867_led_mode(u8 index, unsigned long rules) +{ + if (index >= DP83867_LED_COUNT) + return -EINVAL; + + switch (rules) { + case BIT(TRIGGER_NETDEV_LINK): + return DP83867_LED_FN_LINK; + case BIT(TRIGGER_NETDEV_LINK_10): + return DP83867_LED_FN_LINK_10_BT; + case BIT(TRIGGER_NETDEV_LINK_100): + return DP83867_LED_FN_LINK_100_BTX; + case BIT(TRIGGER_NETDEV_FULL_DUPLEX): + return DP83867_LED_FN_FULL_DUPLEX; + case BIT(TRIGGER_NETDEV_TX): + return DP83867_LED_FN_TX; + case BIT(TRIGGER_NETDEV_RX): + return DP83867_LED_FN_RX; + case BIT(TRIGGER_NETDEV_LINK_1000): + return DP83867_LED_FN_LINK_1000_BT; + case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX): + return DP83867_LED_FN_RX_TX; + case BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000): + return DP83867_LED_FN_LINK_100_1000_BT; + case BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100): + return DP83867_LED_FN_LINK_10_100_BT; + case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX): + return DP83867_LED_FN_LINK_RX_TX; + default: + return -EOPNOTSUPP; + } +} + +static int dp83867_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int ret; + + ret = dp83867_led_mode(index, rules); + if (ret < 0) + return ret; + + return 0; +} + +static int dp83867_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int mode, ret; + + mode = dp83867_led_mode(index, rules); + if (mode < 0) + return mode; + + ret = phy_modify(phydev, DP83867_LEDCR1, DP83867_LED_FN_MASK(index), + DP83867_LED_FN(index, mode)); + if (ret) + return ret; + + return phy_modify(phydev, DP83867_LEDCR2, DP83867_LED_DRV_EN(index), 0); +} + +static int dp83867_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int val; + + val = phy_read(phydev, DP83867_LEDCR1); + if (val < 0) + return val; + + val &= DP83867_LED_FN_MASK(index); + val >>= index * 4; + + switch (val) { + case DP83867_LED_FN_LINK: + *rules = BIT(TRIGGER_NETDEV_LINK); + break; + case DP83867_LED_FN_LINK_10_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_10); + break; + case DP83867_LED_FN_LINK_100_BTX: + *rules = BIT(TRIGGER_NETDEV_LINK_100); + break; + case DP83867_LED_FN_FULL_DUPLEX: + *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX); + break; + case DP83867_LED_FN_TX: + *rules = BIT(TRIGGER_NETDEV_TX); + break; + case DP83867_LED_FN_RX: + *rules = BIT(TRIGGER_NETDEV_RX); + break; + case DP83867_LED_FN_LINK_1000_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_1000); + break; + case DP83867_LED_FN_RX_TX: + *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX); + break; + case DP83867_LED_FN_LINK_100_1000_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000); + break; + case DP83867_LED_FN_LINK_10_100_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100); + break; + case DP83867_LED_FN_LINK_RX_TX: + *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX); + break; + default: + *rules = 0; + break; + } + + return 0; +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -1047,6 +1181,9 @@ static struct phy_driver dp83867_driver[] = { .set_loopback = dp83867_loopback, .led_brightness_set = dp83867_led_brightness_set, + .led_hw_is_supported = dp83867_led_hw_is_supported, + .led_hw_control_set = dp83867_led_hw_control_set, + .led_hw_control_get = dp83867_led_hw_control_get, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c index 8a20d9889f..0f3a1538a8 100644 --- a/drivers/net/phy/mediatek-ge-soc.c +++ b/drivers/net/phy/mediatek-ge-soc.c @@ -489,7 +489,7 @@ static int tx_r50_fill_result(struct phy_device *phydev, u16 tx_r50_cal_val, u16 reg, val; if (phydev->drv->phy_id == MTK_GPHY_ID_MT7988) - bias = -2; + bias = -1; val = clamp_val(bias + tx_r50_cal_val, 0, 63); @@ -705,6 +705,11 @@ restore: static void mt798x_phy_common_finetune(struct phy_device *phydev) { phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); + /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */ + __phy_write(phydev, 0x11, 0xc71); + __phy_write(phydev, 0x12, 0xc); + __phy_write(phydev, 0x10, 0x8fae); + /* EnabRandUpdTrig = 1 */ __phy_write(phydev, 0x11, 0x2f00); __phy_write(phydev, 0x12, 0xe); @@ -715,15 +720,56 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev) __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x83aa); - /* TrFreeze = 0 */ + /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */ + __phy_write(phydev, 0x11, 0x240); + __phy_write(phydev, 0x12, 0x0); + __phy_write(phydev, 0x10, 0x9680); + + /* TrFreeze = 0 (mt7988 default) */ __phy_write(phydev, 0x11, 0x0); __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x9686); + /* SSTrKp100 = 5 */ + /* SSTrKf100 = 6 */ + /* SSTrKp1000Mas = 5 */ + /* SSTrKf1000Mas = 6 */ /* SSTrKp1000Slv = 5 */ + /* SSTrKf1000Slv = 6 */ __phy_write(phydev, 0x11, 0xbaef); __phy_write(phydev, 0x12, 0x2e); __phy_write(phydev, 0x10, 0x968c); + phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); +} + +static void mt7981_phy_finetune(struct phy_device *phydev) +{ + u16 val[8] = { 0x01ce, 0x01c1, + 0x020f, 0x0202, + 0x03d0, 0x03c0, + 0x0013, 0x0005 }; + int i, k; + + /* 100M eye finetune: + * Keep middle level of TX MLT3 shapper as default. + * Only change TX MLT3 overshoot level here. + */ + for (k = 0, i = 1; i < 12; i++) { + if (i % 3 == 0) + continue; + phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]); + } + + phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); + /* ResetSyncOffset = 6 */ + __phy_write(phydev, 0x11, 0x600); + __phy_write(phydev, 0x12, 0x0); + __phy_write(phydev, 0x10, 0x8fc0); + + /* VgaDecRate = 1 */ + __phy_write(phydev, 0x11, 0x4c2a); + __phy_write(phydev, 0x12, 0x3e); + __phy_write(phydev, 0x10, 0x8fa4); /* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2, * MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2 @@ -738,7 +784,7 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev) __phy_write(phydev, 0x10, 0x8ec0); phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); - /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9*/ + /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234, MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK, BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0x9)); @@ -771,48 +817,6 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222); } -static void mt7981_phy_finetune(struct phy_device *phydev) -{ - u16 val[8] = { 0x01ce, 0x01c1, - 0x020f, 0x0202, - 0x03d0, 0x03c0, - 0x0013, 0x0005 }; - int i, k; - - /* 100M eye finetune: - * Keep middle level of TX MLT3 shapper as default. - * Only change TX MLT3 overshoot level here. - */ - for (k = 0, i = 1; i < 12; i++) { - if (i % 3 == 0) - continue; - phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]); - } - - phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */ - __phy_write(phydev, 0x11, 0xc71); - __phy_write(phydev, 0x12, 0xc); - __phy_write(phydev, 0x10, 0x8fae); - - /* ResetSyncOffset = 6 */ - __phy_write(phydev, 0x11, 0x600); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x8fc0); - - /* VgaDecRate = 1 */ - __phy_write(phydev, 0x11, 0x4c2a); - __phy_write(phydev, 0x12, 0x3e); - __phy_write(phydev, 0x10, 0x8fa4); - - /* FfeUpdGainForce = 4 */ - __phy_write(phydev, 0x11, 0x240); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x9680); - - phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); -} - static void mt7988_phy_finetune(struct phy_device *phydev) { u16 val[12] = { 0x0187, 0x01cd, 0x01c8, 0x0182, @@ -827,17 +831,7 @@ static void mt7988_phy_finetune(struct phy_device *phydev) /* TCT finetune */ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5); - /* Disable TX power saving */ - phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7, - MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8); - phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - - /* SlvDSPreadyTime = 24, MasDSPreadyTime = 12 */ - __phy_write(phydev, 0x11, 0x671); - __phy_write(phydev, 0x12, 0xc); - __phy_write(phydev, 0x10, 0x8fae); - /* ResetSyncOffset = 5 */ __phy_write(phydev, 0x11, 0x500); __phy_write(phydev, 0x12, 0x0); @@ -845,13 +839,27 @@ static void mt7988_phy_finetune(struct phy_device *phydev) /* VgaDecRate is 1 at default on mt7988 */ - phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); + /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7, + * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7 + */ + __phy_write(phydev, 0x11, 0xb90a); + __phy_write(phydev, 0x12, 0x6f); + __phy_write(phydev, 0x10, 0x8f82); + + /* RemAckCntLimitCtrl = 1 */ + __phy_write(phydev, 0x11, 0xfbba); + __phy_write(phydev, 0x12, 0xc3); + __phy_write(phydev, 0x10, 0x87f8); - phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_2A30); - /* TxClkOffset = 2 */ - __phy_modify(phydev, MTK_PHY_ANARG_RG, MTK_PHY_TCLKOFFSET_MASK, - FIELD_PREP(MTK_PHY_TCLKOFFSET_MASK, 0x2)); phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); + + /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */ + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234, + MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK, + BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0xa)); + + /* rg_tr_lpf_cnt_val = 1023 */ + phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_LPF_CNT_VAL, 0x3ff); } static void mt798x_phy_eee(struct phy_device *phydev) @@ -884,11 +892,11 @@ static void mt798x_phy_eee(struct phy_device *phydev) MTK_PHY_LPI_SLV_SEND_TX_EN, FIELD_PREP(MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK, 0x120)); - phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239, - MTK_PHY_LPI_SEND_LOC_TIMER_MASK | - MTK_PHY_LPI_TXPCS_LOC_RCV, - FIELD_PREP(MTK_PHY_LPI_SEND_LOC_TIMER_MASK, 0x117)); + /* Keep MTK_PHY_LPI_SEND_LOC_TIMER as 375 */ + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239, + MTK_PHY_LPI_TXPCS_LOC_RCV); + /* This also fixes some IoT issues, such as CH340 */ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2C7, MTK_PHY_MAX_GAIN_MASK | MTK_PHY_MIN_GAIN_MASK, FIELD_PREP(MTK_PHY_MAX_GAIN_MASK, 0x8) | @@ -922,7 +930,7 @@ static void mt798x_phy_eee(struct phy_device *phydev) __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x9690); - /* REG_EEE_st2TrKf1000 = 3 */ + /* REG_EEE_st2TrKf1000 = 2 */ __phy_write(phydev, 0x11, 0x114f); __phy_write(phydev, 0x12, 0x2); __phy_write(phydev, 0x10, 0x969a); @@ -947,7 +955,7 @@ static void mt798x_phy_eee(struct phy_device *phydev) __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x96b8); - /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 1 */ + /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */ __phy_write(phydev, 0x11, 0x1463); __phy_write(phydev, 0x12, 0x0); __phy_write(phydev, 0x10, 0x96ca); @@ -1459,6 +1467,13 @@ static int mt7988_phy_probe(struct phy_device *phydev) if (err) return err; + /* Disable TX power saving at probing to: + * 1. Meet common mode compliance test criteria + * 2. Make sure that TX-VCM calibration works fine + */ + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7, + MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8); + return mt798x_phy_calibration(phydev); } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 27ca25bbd1..ca2db4adcb 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1738,6 +1738,28 @@ static int ksz886x_config_aneg(struct phy_device *phydev) if (ret) return ret; + if (phydev->autoneg != AUTONEG_ENABLE) { + /* When autonegotation is disabled, we need to manually force + * the link state. If we don't do this, the PHY will keep + * sending Fast Link Pulses (FLPs) which are part of the + * autonegotiation process. This is not desired when + * autonegotiation is off. + */ + ret = phy_set_bits(phydev, MII_KSZPHY_CTRL, + KSZ886X_CTRL_FORCE_LINK); + if (ret) + return ret; + } else { + /* If we had previously forced the link state, we need to + * clear KSZ886X_CTRL_FORCE_LINK bit now. Otherwise, the PHY + * will not perform autonegotiation. + */ + ret = phy_clear_bits(phydev, MII_KSZPHY_CTRL, + KSZ886X_CTRL_FORCE_LINK); + if (ret) + return ret; + } + /* The MDI-X configuration is automatically changed by the PHY after * switching from autoneg off to on. So, take MDI-X configuration under * own control and set it after autoneg configuration was done. @@ -3628,12 +3650,8 @@ static int lan8841_ts_info(struct mii_timestamper *mii_ts, info->phc_index = ptp_priv->ptp_clock ? ptp_clock_index(ptp_priv->ptp_clock) : -1; - if (info->phc_index == -1) { - info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; + if (info->phc_index == -1) return 0; - } info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index b13e15310f..a713999651 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -414,10 +414,8 @@ static void tja11xx_get_strings(struct phy_device *phydev, u8 *data) { int i; - for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) { - strncpy(data + i * ETH_GSTRING_LEN, - tja11xx_hw_stats[i].string, ETH_GSTRING_LEN); - } + for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) + ethtool_sprintf(&data, "%s", tja11xx_hw_stats[i].string); } static void tja11xx_get_stats(struct phy_device *phydev, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index df54c137c5..a5fa077650 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -981,7 +981,7 @@ static int phy_check_link_status(struct phy_device *phydev) * If the PHYCONTROL Layer is operating, we change the state to * reflect the beginning of Auto-negotiation or forcing. */ -static int _phy_start_aneg(struct phy_device *phydev) +int _phy_start_aneg(struct phy_device *phydev) { int err; @@ -1002,6 +1002,7 @@ static int _phy_start_aneg(struct phy_device *phydev) return err; } +EXPORT_SYMBOL(_phy_start_aneg); /** * phy_start_aneg - start auto-negotiation for this PHY device @@ -1231,9 +1232,7 @@ static void phy_error_precise(struct phy_device *phydev, const void *func, int err) { WARN(1, "%pS: returned: %d\n", func, err); - mutex_lock(&phydev->lock); phy_process_error(phydev); - mutex_unlock(&phydev->lock); } /** @@ -1355,6 +1354,113 @@ void phy_free_interrupt(struct phy_device *phydev) } EXPORT_SYMBOL(phy_free_interrupt); +enum phy_state_work { + PHY_STATE_WORK_NONE, + PHY_STATE_WORK_ANEG, + PHY_STATE_WORK_SUSPEND, +}; + +static enum phy_state_work _phy_state_machine(struct phy_device *phydev) +{ + enum phy_state_work state_work = PHY_STATE_WORK_NONE; + struct net_device *dev = phydev->attached_dev; + enum phy_state old_state = phydev->state; + const void *func = NULL; + bool finished = false; + int err = 0; + + switch (phydev->state) { + case PHY_DOWN: + case PHY_READY: + break; + case PHY_UP: + state_work = PHY_STATE_WORK_ANEG; + break; + case PHY_NOLINK: + case PHY_RUNNING: + err = phy_check_link_status(phydev); + func = &phy_check_link_status; + break; + case PHY_CABLETEST: + err = phydev->drv->cable_test_get_status(phydev, &finished); + if (err) { + phy_abort_cable_test(phydev); + netif_testing_off(dev); + state_work = PHY_STATE_WORK_ANEG; + phydev->state = PHY_UP; + break; + } + + if (finished) { + ethnl_cable_test_finished(phydev); + netif_testing_off(dev); + state_work = PHY_STATE_WORK_ANEG; + phydev->state = PHY_UP; + } + break; + case PHY_HALTED: + case PHY_ERROR: + if (phydev->link) { + phydev->link = 0; + phy_link_down(phydev); + } + state_work = PHY_STATE_WORK_SUSPEND; + break; + } + + if (state_work == PHY_STATE_WORK_ANEG) { + err = _phy_start_aneg(phydev); + func = &_phy_start_aneg; + } + + if (err == -ENODEV) + return state_work; + + if (err < 0) + phy_error_precise(phydev, func, err); + + phy_process_state_change(phydev, old_state); + + /* Only re-schedule a PHY state machine change if we are polling the + * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving + * between states from phy_mac_interrupt(). + * + * In state PHY_HALTED the PHY gets suspended, so rescheduling the + * state machine would be pointless and possibly error prone when + * called from phy_disconnect() synchronously. + */ + if (phy_polling_mode(phydev) && phy_is_started(phydev)) + phy_queue_state_machine(phydev, PHY_STATE_TIME); + + return state_work; +} + +/* unlocked part of the PHY state machine */ +static void _phy_state_machine_post_work(struct phy_device *phydev, + enum phy_state_work state_work) +{ + if (state_work == PHY_STATE_WORK_SUSPEND) + phy_suspend(phydev); +} + +/** + * phy_state_machine - Handle the state machine + * @work: work_struct that describes the work to be done + */ +void phy_state_machine(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct phy_device *phydev = + container_of(dwork, struct phy_device, state_queue); + enum phy_state_work state_work; + + mutex_lock(&phydev->lock); + state_work = _phy_state_machine(phydev); + mutex_unlock(&phydev->lock); + + _phy_state_machine_post_work(phydev, state_work); +} + /** * phy_stop - Bring down the PHY link, and stop checking the status * @phydev: target phy_device struct @@ -1362,6 +1468,7 @@ EXPORT_SYMBOL(phy_free_interrupt); void phy_stop(struct phy_device *phydev) { struct net_device *dev = phydev->attached_dev; + enum phy_state_work state_work; enum phy_state old_state; if (!phy_is_started(phydev) && phydev->state != PHY_DOWN && @@ -1385,9 +1492,10 @@ void phy_stop(struct phy_device *phydev) phydev->state = PHY_HALTED; phy_process_state_change(phydev, old_state); + state_work = _phy_state_machine(phydev); mutex_unlock(&phydev->lock); - phy_state_machine(&phydev->state_queue.work); + _phy_state_machine_post_work(phydev, state_work); phy_stop_machine(phydev); /* Cannot call flush_scheduled_work() here as desired because @@ -1431,97 +1539,6 @@ out: } EXPORT_SYMBOL(phy_start); -/** - * phy_state_machine - Handle the state machine - * @work: work_struct that describes the work to be done - */ -void phy_state_machine(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct phy_device *phydev = - container_of(dwork, struct phy_device, state_queue); - struct net_device *dev = phydev->attached_dev; - bool needs_aneg = false, do_suspend = false; - enum phy_state old_state; - const void *func = NULL; - bool finished = false; - int err = 0; - - mutex_lock(&phydev->lock); - - old_state = phydev->state; - - switch (phydev->state) { - case PHY_DOWN: - case PHY_READY: - break; - case PHY_UP: - needs_aneg = true; - - break; - case PHY_NOLINK: - case PHY_RUNNING: - err = phy_check_link_status(phydev); - func = &phy_check_link_status; - break; - case PHY_CABLETEST: - err = phydev->drv->cable_test_get_status(phydev, &finished); - if (err) { - phy_abort_cable_test(phydev); - netif_testing_off(dev); - needs_aneg = true; - phydev->state = PHY_UP; - break; - } - - if (finished) { - ethnl_cable_test_finished(phydev); - netif_testing_off(dev); - needs_aneg = true; - phydev->state = PHY_UP; - } - break; - case PHY_HALTED: - case PHY_ERROR: - if (phydev->link) { - phydev->link = 0; - phy_link_down(phydev); - } - do_suspend = true; - break; - } - - mutex_unlock(&phydev->lock); - - if (needs_aneg) { - err = phy_start_aneg(phydev); - func = &phy_start_aneg; - } else if (do_suspend) { - phy_suspend(phydev); - } - - if (err == -ENODEV) - return; - - if (err < 0) - phy_error_precise(phydev, func, err); - - phy_process_state_change(phydev, old_state); - - /* Only re-schedule a PHY state machine change if we are polling the - * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving - * between states from phy_mac_interrupt(). - * - * In state PHY_HALTED the PHY gets suspended, so rescheduling the - * state machine would be pointless and possibly error prone when - * called from phy_disconnect() synchronously. - */ - mutex_lock(&phydev->lock); - if (phy_polling_mode(phydev) && phy_is_started(phydev)) - phy_queue_state_machine(phydev, PHY_STATE_TIME); - mutex_unlock(&phydev->lock); -} - /** * phy_mac_interrupt - MAC says the link has changed * @phydev: phy_device struct with changed link diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b5f012619e..25c19496a3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -257,7 +257,8 @@ static int phylink_interface_max_speed(phy_interface_t interface) * Set all possible pause, speed and duplex linkmodes in @linkmodes that are * supported by the @caps. @linkmodes must have been initialised previously. */ -void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps) +static void phylink_caps_to_linkmodes(unsigned long *linkmodes, + unsigned long caps) { if (caps & MAC_SYM_PAUSE) __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes); @@ -400,7 +401,6 @@ void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps) __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes); } } -EXPORT_SYMBOL_GPL(phylink_caps_to_linkmodes); static struct { unsigned long mask; @@ -477,9 +477,9 @@ static unsigned long phylink_cap_from_speed_duplex(int speed, * Get the MAC capabilities that are supported by the @interface mode and * @mac_capabilities. */ -unsigned long phylink_get_capabilities(phy_interface_t interface, - unsigned long mac_capabilities, - int rate_matching) +static unsigned long phylink_get_capabilities(phy_interface_t interface, + unsigned long mac_capabilities, + int rate_matching) { int max_speed = phylink_interface_max_speed(interface); unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE; @@ -606,7 +606,6 @@ unsigned long phylink_get_capabilities(phy_interface_t interface, return (caps & mac_capabilities) | matched_caps; } -EXPORT_SYMBOL_GPL(phylink_get_capabilities); /** * phylink_validate_mask_caps() - Restrict link modes based on caps @@ -618,9 +617,9 @@ EXPORT_SYMBOL_GPL(phylink_get_capabilities); * @supported and @state based on that. Use this function if your capabiliies * aren't constant, such as if they vary depending on the interface. */ -void phylink_validate_mask_caps(unsigned long *supported, - struct phylink_link_state *state, - unsigned long mac_capabilities) +static void phylink_validate_mask_caps(unsigned long *supported, + struct phylink_link_state *state, + unsigned long mac_capabilities) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; unsigned long caps; @@ -634,29 +633,12 @@ void phylink_validate_mask_caps(unsigned long *supported, linkmode_and(supported, supported, mask); linkmode_and(state->advertising, state->advertising, mask); } -EXPORT_SYMBOL_GPL(phylink_validate_mask_caps); - -/** - * phylink_generic_validate() - generic validate() callback implementation - * @config: a pointer to a &struct phylink_config. - * @supported: ethtool bitmask for supported link modes. - * @state: a pointer to a &struct phylink_link_state. - * - * Generic implementation of the validate() callback that MAC drivers can - * use when they pass the range of supported interfaces and MAC capabilities. - */ -void phylink_generic_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - phylink_validate_mask_caps(supported, state, config->mac_capabilities); -} -EXPORT_SYMBOL_GPL(phylink_generic_validate); static int phylink_validate_mac_and_pcs(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { + unsigned long capabilities; struct phylink_pcs *pcs; int ret; @@ -696,10 +678,13 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, } /* Then validate the link parameters with the MAC */ - if (pl->mac_ops->validate) - pl->mac_ops->validate(pl->config, supported, state); + if (pl->mac_ops->mac_get_caps) + capabilities = pl->mac_ops->mac_get_caps(pl->config, + state->interface); else - phylink_generic_validate(pl->config, supported, state); + capabilities = pl->config->mac_capabilities; + + phylink_validate_mask_caps(supported, state, capabilities); return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; } @@ -3742,3 +3727,4 @@ static int __init phylink_init(void) module_init(phylink_init); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("phylink models the MAC to optional PHY connection"); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 894172a3e1..337899c697 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -421,9 +421,11 @@ static int rtl8211f_config_init(struct phy_device *phydev) ERR_PTR(ret)); return ret; } + + return genphy_soft_reset(phydev); } - return genphy_soft_reset(phydev); + return 0; } static int rtl821x_suspend(struct phy_device *phydev) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 3679a43f4e..5468bd209f 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -257,6 +257,7 @@ struct sfp { unsigned int state_hw_drive; unsigned int state_hw_mask; unsigned int state_soft_mask; + unsigned int state_ignore_mask; unsigned int state; struct delayed_work poll; @@ -280,7 +281,6 @@ struct sfp { unsigned int rs_state_mask; bool have_a2; - bool tx_fault_ignore; const struct sfp_quirk *quirk; @@ -345,9 +345,24 @@ static void sfp_fixup_long_startup(struct sfp *sfp) sfp->module_t_start_up = T_START_UP_BAD_GPON; } +static void sfp_fixup_ignore_los(struct sfp *sfp) +{ + /* This forces LOS to zero, so we ignore transitions */ + sfp->state_ignore_mask |= SFP_F_LOS; + /* Make sure that LOS options are clear */ + sfp->id.ext.options &= ~cpu_to_be16(SFP_OPTIONS_LOS_INVERTED | + SFP_OPTIONS_LOS_NORMAL); +} + static void sfp_fixup_ignore_tx_fault(struct sfp *sfp) { - sfp->tx_fault_ignore = true; + sfp->state_ignore_mask |= SFP_F_TX_FAULT; +} + +static void sfp_fixup_nokia(struct sfp *sfp) +{ + sfp_fixup_long_startup(sfp); + sfp_fixup_ignore_los(sfp); } // For 10GBASE-T short-reach modules @@ -446,7 +461,7 @@ static const struct sfp_quirk sfp_quirks[] = { // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd // NRZ in their EEPROM SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex, - sfp_fixup_long_startup), + sfp_fixup_nokia), // Fiberstore SFP-10G-T doesn't identify as copper, and uses the // Rollball protocol to talk to the PHY. @@ -792,7 +807,8 @@ static void sfp_soft_start_poll(struct sfp *sfp) mutex_lock(&sfp->st_mutex); // Poll the soft state for hardware pins we want to ignore - sfp->state_soft_mask = ~sfp->state_hw_mask & mask; + sfp->state_soft_mask = ~sfp->state_hw_mask & ~sfp->state_ignore_mask & + mask; if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) && !sfp->need_poll) @@ -2317,7 +2333,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) sfp->module_t_start_up = T_START_UP; sfp->module_t_wait = T_WAIT; - sfp->tx_fault_ignore = false; + sfp->state_ignore_mask = 0; if (sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SFI || sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SR || @@ -2340,6 +2356,8 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) if (sfp->quirk && sfp->quirk->fixup) sfp->quirk->fixup(sfp); + + sfp->state_hw_mask &= ~sfp->state_ignore_mask; mutex_unlock(&sfp->st_mutex); return 0; @@ -2841,10 +2859,7 @@ static void sfp_check_state(struct sfp *sfp) mutex_lock(&sfp->st_mutex); state = sfp_get_state(sfp); changed = state ^ sfp->state; - if (sfp->tx_fault_ignore) - changed &= SFP_F_PRESENT | SFP_F_LOS; - else - changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; for (i = 0; i < GPIO_MAX; i++) if (changed & BIT(i)) @@ -3138,3 +3153,4 @@ module_exit(sfp_exit); MODULE_ALIAS("platform:sfp"); MODULE_AUTHOR("Russell King"); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SFP cage support"); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index c88edb19d2..1c7306a1af 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -507,10 +507,8 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data) { int i; - for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++) { - strncpy(data + i * ETH_GSTRING_LEN, - smsc_hw_stats[i].string, ETH_GSTRING_LEN); - } + for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++) + ethtool_sprintf(&data, "%s", smsc_hw_stats[i].string); } static u64 smsc_get_stat(struct phy_device *phydev, int i) diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index fbaaa8c102..e94a4b08fd 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -460,6 +460,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) case PPPIOCSMRU: if (get_user(val, p)) break; + if (val > U16_MAX) { + err = -EINVAL; + break; + } if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a9beacd552..0193af2d31 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -570,8 +570,8 @@ static struct bpf_prog *get_filter(struct sock_fprog *uprog) /* uprog->len is unsigned short, so no overflow here */ fprog.len = uprog->len; - fprog.filter = memdup_user(uprog->filter, - uprog->len * sizeof(struct sock_filter)); + fprog.filter = memdup_array_user(uprog->filter, + uprog->len, sizeof(struct sock_filter)); if (IS_ERR(fprog.filter)) return ERR_CAST(fprog.filter); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index ba8b6bd823..8e7238e97d 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -877,7 +877,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, skb->dev = dev; - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); skb->protocol = cpu_to_be16(ETH_P_PPP_SES); ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr)); diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 36803d932d..d591e33268 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -1194,4 +1194,5 @@ fail: } EXPORT_SYMBOL(sungem_phy_probe); +MODULE_DESCRIPTION("PHY drivers for the sungem Ethernet MAC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 5c01cc7b99..9f0495e8df 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1399,6 +1399,7 @@ void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) } EXPORT_SYMBOL_GPL(tap_destroy_cdev); +MODULE_DESCRIPTION("Common library for drivers implementing the TAP interface"); MODULE_AUTHOR("Arnd Bergmann "); MODULE_AUTHOR("Sainath Grandhi "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 5a1bf42ce1..d837c18874 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1315,8 +1315,6 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf) netif_set_tso_max_size(dev->net, 16384); - ax88179_reset(dev); - return 0; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 59cde06aa7..5add4145d9 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1758,7 +1758,7 @@ static void lan78xx_get_drvinfo(struct net_device *net, { struct lan78xx_net *dev = netdev_priv(net); - strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 127b34dcc5..9bf2140fd0 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2584,8 +2584,9 @@ static int rx_bottom(struct r8152 *tp, int budget) while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; - unsigned int pkt_len, rx_frag_head_sz; + unsigned int pkt_len, rx_frag_head_sz, len; struct sk_buff *skb; + bool use_frags; WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000); @@ -2598,45 +2599,77 @@ static int rx_bottom(struct r8152 *tp, int budget) break; pkt_len -= ETH_FCS_LEN; + len = pkt_len; rx_data += sizeof(struct rx_desc); - if (!agg_free || tp->rx_copybreak > pkt_len) - rx_frag_head_sz = pkt_len; + if (!agg_free || tp->rx_copybreak > len) + use_frags = false; else - rx_frag_head_sz = tp->rx_copybreak; + use_frags = true; + + if (use_frags) { + /* If the budget is exhausted, the packet + * would be queued in the driver. That is, + * napi_gro_frags() wouldn't be called, so + * we couldn't use napi_get_frags(). + */ + if (work_done >= budget) { + rx_frag_head_sz = tp->rx_copybreak; + skb = napi_alloc_skb(napi, + rx_frag_head_sz); + } else { + rx_frag_head_sz = 0; + skb = napi_get_frags(napi); + } + } else { + rx_frag_head_sz = 0; + skb = napi_alloc_skb(napi, len); + } - skb = napi_alloc_skb(napi, rx_frag_head_sz); if (!skb) { stats->rx_dropped++; goto find_next_rx; } skb->ip_summed = r8152_rx_csum(tp, rx_desc); - memcpy(skb->data, rx_data, rx_frag_head_sz); - skb_put(skb, rx_frag_head_sz); - pkt_len -= rx_frag_head_sz; - rx_data += rx_frag_head_sz; - if (pkt_len) { + rtl_rx_vlan_tag(rx_desc, skb); + + if (use_frags) { + if (rx_frag_head_sz) { + memcpy(skb->data, rx_data, + rx_frag_head_sz); + skb_put(skb, rx_frag_head_sz); + len -= rx_frag_head_sz; + rx_data += rx_frag_head_sz; + skb->protocol = eth_type_trans(skb, + netdev); + } + skb_add_rx_frag(skb, 0, agg->page, agg_offset(agg, rx_data), - pkt_len, - SKB_DATA_ALIGN(pkt_len)); + len, SKB_DATA_ALIGN(len)); get_page(agg->page); + } else { + memcpy(skb->data, rx_data, len); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, netdev); } - skb->protocol = eth_type_trans(skb, netdev); - rtl_rx_vlan_tag(rx_desc, skb); if (work_done < budget) { + if (use_frags) + napi_gro_frags(napi); + else + napi_gro_receive(napi, skb); + work_done++; stats->rx_packets++; - stats->rx_bytes += skb->len; - napi_gro_receive(napi, skb); + stats->rx_bytes += pkt_len; } else { __skb_queue_tail(&tp->rx_queue, skb); } find_next_rx: - rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN); + rx_data = rx_agg_align(rx_data + len + ETH_FCS_LEN); rx_desc = (struct rx_desc *)rx_data; len_used = agg_offset(agg, rx_data); len_used += sizeof(struct rx_desc); diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index f5e19f3ef6..143bd4ab16 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -474,8 +474,8 @@ static void sr_get_drvinfo(struct net_device *net, { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); - strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); - strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); } static u32 sr_get_link(struct net_device *net) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 0f798bcbe2..977861c46b 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -729,10 +729,11 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, if (skb_shared(skb) || skb_head_is_locked(skb) || skb_shinfo(skb)->nr_frags || skb_headroom(skb) < XDP_PACKET_HEADROOM) { - u32 size, len, max_head_size, off; + u32 size, len, max_head_size, off, truesize, page_offset; struct sk_buff *nskb; struct page *page; int i, head_off; + void *va; /* We need a private copy of the skb and data buffers since * the ebpf program can modify it. We segment the original skb @@ -745,14 +746,17 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) goto drop; + size = min_t(u32, skb->len, max_head_size); + truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM; + /* Allocate skb head */ - page = page_pool_dev_alloc_pages(rq->page_pool); - if (!page) + va = page_pool_dev_alloc_va(rq->page_pool, &truesize); + if (!va) goto drop; - nskb = napi_build_skb(page_address(page), PAGE_SIZE); + nskb = napi_build_skb(va, truesize); if (!nskb) { - page_pool_put_full_page(rq->page_pool, page, true); + page_pool_free_va(rq->page_pool, va, true); goto drop; } @@ -760,7 +764,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, skb_copy_header(nskb, skb); skb_mark_for_recycle(nskb); - size = min_t(u32, skb->len, max_head_size); if (skb_copy_bits(skb, 0, nskb->data, size)) { consume_skb(nskb); goto drop; @@ -775,15 +778,20 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, len = skb->len - off; for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { - page = page_pool_dev_alloc_pages(rq->page_pool); + size = min_t(u32, len, PAGE_SIZE); + truesize = size; + + page = page_pool_dev_alloc(rq->page_pool, &page_offset, + &truesize); if (!page) { consume_skb(nskb); goto drop; } - size = min_t(u32, len, PAGE_SIZE); - skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE); - if (skb_copy_bits(skb, off, page_address(page), + skb_add_rx_frag(nskb, i, page, page_offset, size, + truesize); + if (skb_copy_bits(skb, off, + page_address(page) + page_offset, size)) { consume_skb(nskb); goto drop; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index deb2229ab4..1caf21fd50 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3366,7 +3366,7 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) { /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL - * feature is negotiated. + * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated. */ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) return -EOPNOTSUPP; @@ -4096,10 +4096,11 @@ static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; - int ret = -ENOMEM; - int i, total_vqs; const char **names; + int ret = -ENOMEM; + int total_vqs; bool *ctx; + u16 i; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by @@ -4136,8 +4137,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; - sprintf(vi->rq[i].name, "input.%d", i); - sprintf(vi->sq[i].name, "output.%d", i); + sprintf(vi->rq[i].name, "input.%u", i); + sprintf(vi->sq[i].name, "output.%u", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; if (ctx) @@ -4480,13 +4481,6 @@ static int virtnet_probe(struct virtio_device *vdev) dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; } - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { - vi->intr_coal_rx.max_usecs = 0; - vi->intr_coal_tx.max_usecs = 0; - vi->intr_coal_tx.max_packets = 0; - vi->intr_coal_rx.max_packets = 0; - } - if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) vi->has_rss_hash_report = true; @@ -4561,6 +4555,27 @@ static int virtnet_probe(struct virtio_device *vdev) if (err) goto free; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { + vi->intr_coal_rx.max_usecs = 0; + vi->intr_coal_tx.max_usecs = 0; + vi->intr_coal_rx.max_packets = 0; + + /* Keep the default values of the coalescing parameters + * aligned with the default napi_tx state. + */ + if (vi->sq[0].napi.weight) + vi->intr_coal_tx.max_packets = 1; + else + vi->intr_coal_tx.max_packets = 0; + } + + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { + /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */ + for (i = 0; i < vi->max_queue_pairs; i++) + if (vi->sq[i].napi.weight) + vi->sq[i].intr_coal.max_packets = 1; + } + #ifdef CONFIG_SYSFS if (vi->mergeable_rx_bufs) dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index b90dccdc2d..bb95ce43cd 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1945,7 +1945,6 @@ static const struct ctl_table vrf_table[] = { /* set by the vrf_netns_init */ .extra1 = NULL, }, - { }, }; static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 5b5597073b..412c3c0b69 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -2215,114 +2215,6 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, return 0; } -static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, - struct vxlan_sock *sock4, - struct sk_buff *skb, int oif, u8 tos, - __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, - __u8 flow_flags, struct dst_cache *dst_cache, - const struct ip_tunnel_info *info) -{ - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct rtable *rt = NULL; - struct flowi4 fl4; - - if (!sock4) - return ERR_PTR(-EIO); - - if (tos && !info) - use_cache = false; - if (use_cache) { - rt = dst_cache_get_ip4(dst_cache, saddr); - if (rt) - return rt; - } - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = oif; - fl4.flowi4_tos = RT_TOS(tos); - fl4.flowi4_mark = skb->mark; - fl4.flowi4_proto = IPPROTO_UDP; - fl4.daddr = daddr; - fl4.saddr = *saddr; - fl4.fl4_dport = dport; - fl4.fl4_sport = sport; - fl4.flowi4_flags = flow_flags; - - rt = ip_route_output_key(vxlan->net, &fl4); - if (!IS_ERR(rt)) { - if (rt->dst.dev == dev) { - netdev_dbg(dev, "circular route to %pI4\n", &daddr); - ip_rt_put(rt); - return ERR_PTR(-ELOOP); - } - - *saddr = fl4.saddr; - if (use_cache) - dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); - } else { - netdev_dbg(dev, "no route to %pI4\n", &daddr); - return ERR_PTR(-ENETUNREACH); - } - return rt; -} - -#if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, - struct net_device *dev, - struct vxlan_sock *sock6, - struct sk_buff *skb, int oif, u8 tos, - __be32 label, - const struct in6_addr *daddr, - struct in6_addr *saddr, - __be16 dport, __be16 sport, - struct dst_cache *dst_cache, - const struct ip_tunnel_info *info) -{ - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct dst_entry *ndst; - struct flowi6 fl6; - - if (!sock6) - return ERR_PTR(-EIO); - - if (tos && !info) - use_cache = false; - if (use_cache) { - ndst = dst_cache_get_ip6(dst_cache, saddr); - if (ndst) - return ndst; - } - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = oif; - fl6.daddr = *daddr; - fl6.saddr = *saddr; - fl6.flowlabel = ip6_make_flowinfo(tos, label); - fl6.flowi6_mark = skb->mark; - fl6.flowi6_proto = IPPROTO_UDP; - fl6.fl6_dport = dport; - fl6.fl6_sport = sport; - - ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, - &fl6, NULL); - if (IS_ERR(ndst)) { - netdev_dbg(dev, "no route to %pI6\n", daddr); - return ERR_PTR(-ENETUNREACH); - } - - if (unlikely(ndst->dev == dev)) { - netdev_dbg(dev, "circular route to %pI6\n", daddr); - dst_release(ndst); - return ERR_PTR(-ELOOP); - } - - *saddr = fl6.saddr; - if (use_cache) - dst_cache_set_ip6(dst_cache, ndst, saddr); - return ndst; -} -#endif - /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct vxlan_dev *dst_vxlan, __be32 vni, @@ -2376,7 +2268,7 @@ drop: static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *vxlan, - union vxlan_addr *daddr, + int addr_family, __be16 dst_port, int dst_ifindex, __be32 vni, struct dst_entry *dst, u32 rt_flags) @@ -2396,7 +2288,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, dst_release(dst); dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, - daddr->sa.sa_family, dst_port, + addr_family, dst_port, vxlan->cfg.flags); if (!dst_vxlan) { dev->stats.tx_errors++; @@ -2418,31 +2310,33 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, { struct dst_cache *dst_cache; struct ip_tunnel_info *info; + struct ip_tunnel_key *pkey; + struct ip_tunnel_key key; struct vxlan_dev *vxlan = netdev_priv(dev); const struct iphdr *old_iph = ip_hdr(skb); - union vxlan_addr *dst; - union vxlan_addr remote_ip, local_ip; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; unsigned int pkt_len = skb->len; __be16 src_port = 0, dst_port; struct dst_entry *ndst = NULL; - __u8 tos, ttl, flow_flags = 0; + int addr_family; + __u8 tos, ttl; int ifindex; int err; u32 flags = vxlan->cfg.flags; + bool use_cache; bool udp_sum = false; bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); __be32 vni = 0; -#if IS_ENABLED(CONFIG_IPV6) - __be32 label; -#endif info = skb_tunnel_info(skb); + use_cache = ip_tunnel_dst_cache_usable(skb, info); if (rdst) { - dst = &rdst->remote_ip; - if (vxlan_addr_any(dst)) { + memset(&key, 0, sizeof(key)); + pkey = &key; + + if (vxlan_addr_any(&rdst->remote_ip)) { if (did_rsc) { /* short-circuited back to local bridge */ vxlan_encap_bypass(skb, vxlan, vxlan, @@ -2452,30 +2346,40 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; } + addr_family = vxlan->cfg.saddr.sa.sa_family; dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = (rdst->remote_vni) ? : default_vni; ifindex = rdst->remote_ifindex; - local_ip = vxlan->cfg.saddr; + + if (addr_family == AF_INET) { + key.u.ipv4.src = vxlan->cfg.saddr.sin.sin_addr.s_addr; + key.u.ipv4.dst = rdst->remote_ip.sin.sin_addr.s_addr; + } else { + key.u.ipv6.src = vxlan->cfg.saddr.sin6.sin6_addr; + key.u.ipv6.dst = rdst->remote_ip.sin6.sin6_addr; + } + dst_cache = &rdst->dst_cache; md->gbp = skb->mark; if (flags & VXLAN_F_TTL_INHERIT) { ttl = ip_tunnel_get_ttl(old_iph, skb); } else { ttl = vxlan->cfg.ttl; - if (!ttl && vxlan_addr_multicast(dst)) + if (!ttl && vxlan_addr_multicast(&rdst->remote_ip)) ttl = 1; } - tos = vxlan->cfg.tos; if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); + if (tos && !info) + use_cache = false; - if (dst->sa.sa_family == AF_INET) + if (addr_family == AF_INET) udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); else udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); #if IS_ENABLED(CONFIG_IPV6) - label = vxlan->cfg.label; + key.label = vxlan->cfg.label; #endif } else { if (!info) { @@ -2483,17 +2387,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dev->name); goto drop; } - remote_ip.sa.sa_family = ip_tunnel_info_af(info); - if (remote_ip.sa.sa_family == AF_INET) { - remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; - local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; - } else { - remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; - local_ip.sin6.sin6_addr = info->key.u.ipv6.src; - } - dst = &remote_ip; + pkey = &info->key; + addr_family = ip_tunnel_info_af(info); dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; - flow_flags = info->key.flow_flags; vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; @@ -2504,28 +2400,24 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = info->key.ttl; tos = info->key.tos; -#if IS_ENABLED(CONFIG_IPV6) - label = info->key.label; -#endif udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); } src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, vxlan->cfg.port_max, true); rcu_read_lock(); - if (dst->sa.sa_family == AF_INET) { + if (addr_family == AF_INET) { struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt; __be16 df = 0; + __be32 saddr; if (!ifindex) ifindex = sock4->sock->sk->sk_bound_dev_if; - rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, - dst->sin.sin_addr.s_addr, - &local_ip.sin.sin_addr.s_addr, - dst_port, src_port, flow_flags, - dst_cache, info); + rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, ifindex, + &saddr, pkey, src_port, dst_port, + tos, use_cache ? dst_cache : NULL); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto tx_error; @@ -2533,7 +2425,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (!info) { /* Bypass encapsulation if the destination is local */ - err = encap_bypass_if_local(skb, dev, vxlan, dst, + err = encap_bypass_if_local(skb, dev, vxlan, AF_INET, dst_port, ifindex, vni, &rt->dst, rt->rt_flags); if (err) @@ -2561,16 +2453,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } else if (err) { if (info) { struct ip_tunnel_info *unclone; - struct in_addr src, dst; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) goto tx_error; - src = remote_ip.sin.sin_addr; - dst = local_ip.sin.sin_addr; - unclone->key.u.ipv4.src = src.s_addr; - unclone->key.u.ipv4.dst = dst.s_addr; + unclone->key.u.ipv4.src = pkey->u.ipv4.dst; + unclone->key.u.ipv4.dst = saddr; } vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); dst_release(ndst); @@ -2584,21 +2473,21 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (err < 0) goto tx_error; - udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr, - dst->sin.sin_addr.s_addr, tos, ttl, df, + udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr, + pkey->u.ipv4.dst, tos, ttl, df, src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); + struct in6_addr saddr; if (!ifindex) ifindex = sock6->sock->sk->sk_bound_dev_if; - ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, - label, &dst->sin6.sin6_addr, - &local_ip.sin6.sin6_addr, - dst_port, src_port, - dst_cache, info); + ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, + ifindex, &saddr, pkey, + src_port, dst_port, tos, + use_cache ? dst_cache : NULL); if (IS_ERR(ndst)) { err = PTR_ERR(ndst); ndst = NULL; @@ -2608,7 +2497,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (!info) { u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; - err = encap_bypass_if_local(skb, dev, vxlan, dst, + err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6, dst_port, ifindex, vni, ndst, rt6i_flags); if (err) @@ -2623,16 +2512,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } else if (err) { if (info) { struct ip_tunnel_info *unclone; - struct in6_addr src, dst; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) goto tx_error; - src = remote_ip.sin6.sin6_addr; - dst = local_ip.sin6.sin6_addr; - unclone->key.u.ipv6.src = src; - unclone->key.u.ipv6.dst = dst; + unclone->key.u.ipv6.src = pkey->u.ipv6.dst; + unclone->key.u.ipv6.dst = saddr; } vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); @@ -2649,9 +2535,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, - &local_ip.sin6.sin6_addr, - &dst->sin6.sin6_addr, tos, ttl, - label, src_port, dst_port, !udp_sum); + &saddr, &pkey->u.ipv6.dst, tos, ttl, + pkey->label, src_port, dst_port, !udp_sum); #endif } vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len); @@ -3022,9 +2907,101 @@ static int vxlan_open(struct net_device *dev) return ret; } +struct vxlan_fdb_flush_desc { + bool ignore_default_entry; + unsigned long state; + unsigned long state_mask; + unsigned long flags; + unsigned long flags_mask; + __be32 src_vni; + u32 nhid; + __be32 vni; + __be16 port; + union vxlan_addr dst_ip; +}; + +static bool vxlan_fdb_is_default_entry(const struct vxlan_fdb *f, + const struct vxlan_dev *vxlan) +{ + return is_zero_ether_addr(f->eth_addr) && f->vni == vxlan->cfg.vni; +} + +static bool vxlan_fdb_nhid_matches(const struct vxlan_fdb *f, u32 nhid) +{ + struct nexthop *nh = rtnl_dereference(f->nh); + + return nh && nh->id == nhid; +} + +static bool vxlan_fdb_flush_matches(const struct vxlan_fdb *f, + const struct vxlan_dev *vxlan, + const struct vxlan_fdb_flush_desc *desc) +{ + if (desc->state_mask && (f->state & desc->state_mask) != desc->state) + return false; + + if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags) + return false; + + if (desc->ignore_default_entry && vxlan_fdb_is_default_entry(f, vxlan)) + return false; + + if (desc->src_vni && f->vni != desc->src_vni) + return false; + + if (desc->nhid && !vxlan_fdb_nhid_matches(f, desc->nhid)) + return false; + + return true; +} + +static bool +vxlan_fdb_flush_should_match_remotes(const struct vxlan_fdb_flush_desc *desc) +{ + return desc->vni || desc->port || desc->dst_ip.sa.sa_family; +} + +static bool +vxlan_fdb_flush_remote_matches(const struct vxlan_fdb_flush_desc *desc, + const struct vxlan_rdst *rd) +{ + if (desc->vni && rd->remote_vni != desc->vni) + return false; + + if (desc->port && rd->remote_port != desc->port) + return false; + + if (desc->dst_ip.sa.sa_family && + !vxlan_addr_equal(&rd->remote_ip, &desc->dst_ip)) + return false; + + return true; +} + +static void +vxlan_fdb_flush_match_remotes(struct vxlan_fdb *f, struct vxlan_dev *vxlan, + const struct vxlan_fdb_flush_desc *desc, + bool *p_destroy_fdb) +{ + bool remotes_flushed = false; + struct vxlan_rdst *rd, *tmp; + + list_for_each_entry_safe(rd, tmp, &f->remotes, list) { + if (!vxlan_fdb_flush_remote_matches(desc, rd)) + continue; + + vxlan_fdb_dst_destroy(vxlan, f, rd, true); + remotes_flushed = true; + } + + *p_destroy_fdb = remotes_flushed && list_empty(&f->remotes); +} + /* Purge the forwarding table */ -static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) +static void vxlan_flush(struct vxlan_dev *vxlan, + const struct vxlan_fdb_flush_desc *desc) { + bool match_remotes = vxlan_fdb_flush_should_match_remotes(desc); unsigned int h; for (h = 0; h < FDB_HASH_SIZE; ++h) { @@ -3034,28 +3011,122 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); - if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) - continue; - /* the all_zeros_mac entry is deleted at vxlan_uninit */ - if (is_zero_ether_addr(f->eth_addr) && - f->vni == vxlan->cfg.vni) + + if (!vxlan_fdb_flush_matches(f, vxlan, desc)) continue; + + if (match_remotes) { + bool destroy_fdb = false; + + vxlan_fdb_flush_match_remotes(f, vxlan, desc, + &destroy_fdb); + + if (!destroy_fdb) + continue; + } + vxlan_fdb_destroy(vxlan, f, true, true); } spin_unlock_bh(&vxlan->hash_lock[h]); } } +static const struct nla_policy vxlan_del_bulk_policy[NDA_MAX + 1] = { + [NDA_SRC_VNI] = { .type = NLA_U32 }, + [NDA_NH_ID] = { .type = NLA_U32 }, + [NDA_VNI] = { .type = NLA_U32 }, + [NDA_PORT] = { .type = NLA_U16 }, + [NDA_DST] = NLA_POLICY_RANGE(NLA_BINARY, sizeof(struct in_addr), + sizeof(struct in6_addr)), + [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, + [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, +}; + +#define VXLAN_FDB_FLUSH_IGNORED_NDM_FLAGS (NTF_MASTER | NTF_SELF) +#define VXLAN_FDB_FLUSH_ALLOWED_NDM_STATES (NUD_PERMANENT | NUD_NOARP) +#define VXLAN_FDB_FLUSH_ALLOWED_NDM_FLAGS (NTF_EXT_LEARNED | NTF_OFFLOADED | \ + NTF_ROUTER) + +static int vxlan_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb_flush_desc desc = {}; + struct ndmsg *ndm = nlmsg_data(nlh); + struct nlattr *tb[NDA_MAX + 1]; + u8 ndm_flags; + int err; + + ndm_flags = ndm->ndm_flags & ~VXLAN_FDB_FLUSH_IGNORED_NDM_FLAGS; + + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, vxlan_del_bulk_policy, + extack); + if (err) + return err; + + if (ndm_flags & ~VXLAN_FDB_FLUSH_ALLOWED_NDM_FLAGS) { + NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set"); + return -EINVAL; + } + if (ndm->ndm_state & ~VXLAN_FDB_FLUSH_ALLOWED_NDM_STATES) { + NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set"); + return -EINVAL; + } + + desc.state = ndm->ndm_state; + desc.flags = ndm_flags; + + if (tb[NDA_NDM_STATE_MASK]) + desc.state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]); + + if (tb[NDA_NDM_FLAGS_MASK]) + desc.flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]); + + if (tb[NDA_SRC_VNI]) + desc.src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); + + if (tb[NDA_NH_ID]) + desc.nhid = nla_get_u32(tb[NDA_NH_ID]); + + if (tb[NDA_VNI]) + desc.vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); + + if (tb[NDA_PORT]) + desc.port = nla_get_be16(tb[NDA_PORT]); + + if (tb[NDA_DST]) { + union vxlan_addr ip; + + err = vxlan_nla_get_addr(&ip, tb[NDA_DST]); + if (err) { + NL_SET_ERR_MSG_ATTR(extack, tb[NDA_DST], + "Unsupported address family"); + return err; + } + desc.dst_ip = ip; + } + + vxlan_flush(vxlan, &desc); + + return 0; +} + /* Cleanup timer and forwarding table on shutdown */ static int vxlan_stop(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb_flush_desc desc = { + /* Default entry is deleted at vxlan_uninit. */ + .ignore_default_entry = true, + .state = 0, + .state_mask = NUD_PERMANENT | NUD_NOARP, + }; vxlan_multicast_leave(vxlan); del_timer_sync(&vxlan->age_timer); - vxlan_flush(vxlan, false); + vxlan_flush(vxlan, &desc); vxlan_sock_release(vxlan); return 0; @@ -3100,11 +3171,14 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt; - rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, - info->key.u.ipv4.dst, - &info->key.u.ipv4.src, dport, sport, - info->key.flow_flags, &info->dst_cache, - info); + if (!sock4) + return -EIO; + + rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, 0, + &info->key.u.ipv4.src, + &info->key, + sport, dport, info->key.tos, + &info->dst_cache); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); @@ -3113,10 +3187,14 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); struct dst_entry *ndst; - ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, - info->key.label, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src, dport, sport, - &info->dst_cache, info); + if (!sock6) + return -EIO; + + ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, + 0, &info->key.u.ipv6.src, + &info->key, + sport, dport, info->key.tos, + &info->dst_cache); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); @@ -3142,11 +3220,13 @@ static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_fdb_add = vxlan_fdb_add, .ndo_fdb_del = vxlan_fdb_delete, + .ndo_fdb_del_bulk = vxlan_fdb_delete_bulk, .ndo_fdb_dump = vxlan_fdb_dump, .ndo_fdb_get = vxlan_fdb_get, .ndo_mdb_add = vxlan_mdb_add, .ndo_mdb_del = vxlan_mdb_del, .ndo_mdb_dump = vxlan_mdb_dump, + .ndo_mdb_get = vxlan_mdb_get, .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, }; @@ -4294,8 +4374,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], static void vxlan_dellink(struct net_device *dev, struct list_head *head) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb_flush_desc desc = { + /* Default entry is deleted at vxlan_uninit. */ + .ignore_default_entry = true, + }; - vxlan_flush(vxlan, true); + vxlan_flush(vxlan, &desc); list_del(&vxlan->next); unregister_netdevice_queue(dev, head); @@ -4305,7 +4389,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) static size_t vxlan_get_size(const struct net_device *dev) { - return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ @@ -4323,7 +4406,6 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ - nla_total_size(sizeof(struct ifla_vxlan_port_range)) + nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ @@ -4331,6 +4413,8 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */ + /* IFLA_VXLAN_PORT_RANGE */ + nla_total_size(sizeof(struct ifla_vxlan_port_range)) + nla_total_size(0) + /* IFLA_VXLAN_GBP */ nla_total_size(0) + /* IFLA_VXLAN_GPE */ nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */ diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c index 5e04162226..eb4c580b5c 100644 --- a/drivers/net/vxlan/vxlan_mdb.c +++ b/drivers/net/vxlan/vxlan_mdb.c @@ -311,7 +311,7 @@ vxlan_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = { [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(vxlan_mdbe_src_list_entry_pol), }; -static struct netlink_range_validation vni_range = { +static const struct netlink_range_validation vni_range = { .max = VXLAN_N_VID - 1, }; @@ -370,12 +370,10 @@ static bool vxlan_mdb_is_valid_source(const struct nlattr *attr, __be16 proto, return true; } -static void vxlan_mdb_config_group_set(struct vxlan_mdb_config *cfg, - const struct br_mdb_entry *entry, - const struct nlattr *source_attr) +static void vxlan_mdb_group_set(struct vxlan_mdb_entry_key *group, + const struct br_mdb_entry *entry, + const struct nlattr *source_attr) { - struct vxlan_mdb_entry_key *group = &cfg->group; - switch (entry->addr.proto) { case htons(ETH_P_IP): group->dst.sa.sa_family = AF_INET; @@ -503,7 +501,7 @@ static int vxlan_mdb_config_attrs_init(struct vxlan_mdb_config *cfg, entry->addr.proto, extack)) return -EINVAL; - vxlan_mdb_config_group_set(cfg, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); + vxlan_mdb_group_set(&cfg->group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); /* rtnetlink code only validates that IPv4 group address is * multicast. @@ -927,23 +925,20 @@ vxlan_mdb_nlmsg_src_list_size(const struct vxlan_mdb_entry_key *group, return nlmsg_size; } -static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, - const struct vxlan_mdb_entry *mdb_entry, - const struct vxlan_mdb_remote *remote) +static size_t +vxlan_mdb_nlmsg_remote_size(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) { const struct vxlan_mdb_entry_key *group = &mdb_entry->key; struct vxlan_rdst *rd = rtnl_dereference(remote->rd); size_t nlmsg_size; - nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + - /* MDBA_MDB */ - nla_total_size(0) + - /* MDBA_MDB_ENTRY */ - nla_total_size(0) + /* MDBA_MDB_ENTRY_INFO */ - nla_total_size(sizeof(struct br_mdb_entry)) + + nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) + /* MDBA_MDB_EATTR_TIMER */ nla_total_size(sizeof(u32)); + /* MDBA_MDB_EATTR_SOURCE */ if (vxlan_mdb_is_sg(group)) nlmsg_size += nla_total_size(vxlan_addr_size(&group->dst)); @@ -971,6 +966,19 @@ static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, return nlmsg_size; } +static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) +{ + return NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0) + + /* Remote entry */ + vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry, remote); +} + static int vxlan_mdb_nlmsg_fill(const struct vxlan_dev *vxlan, struct sk_buff *skb, const struct vxlan_mdb_entry *mdb_entry, @@ -1298,6 +1306,156 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[], return err; } +static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = { + [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY, + sizeof(struct in_addr), + sizeof(struct in6_addr)), + [MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range), +}; + +static int vxlan_mdb_get_parse(struct net_device *dev, struct nlattr *tb[], + struct vxlan_mdb_entry_key *group, + struct netlink_ext_ack *extack) +{ + struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]); + struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1]; + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + + memset(group, 0, sizeof(*group)); + group->vni = vxlan->default_dst.remote_vni; + + if (!tb[MDBA_GET_ENTRY_ATTRS]) { + vxlan_mdb_group_set(group, entry, NULL); + return 0; + } + + err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX, + tb[MDBA_GET_ENTRY_ATTRS], + vxlan_mdbe_attrs_get_pol, extack); + if (err) + return err; + + if (mdbe_attrs[MDBE_ATTR_SOURCE] && + !vxlan_mdb_is_valid_source(mdbe_attrs[MDBE_ATTR_SOURCE], + entry->addr.proto, extack)) + return -EINVAL; + + vxlan_mdb_group_set(group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); + + if (mdbe_attrs[MDBE_ATTR_SRC_VNI]) + group->vni = + cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI])); + + return 0; +} + +static struct sk_buff * +vxlan_mdb_get_reply_alloc(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry) +{ + struct vxlan_mdb_remote *remote; + size_t nlmsg_size; + + nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0); + + list_for_each_entry(remote, &mdb_entry->remotes, list) + nlmsg_size += vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry, + remote); + + return nlmsg_new(nlmsg_size, GFP_KERNEL); +} + +static int +vxlan_mdb_get_reply_fill(const struct vxlan_dev *vxlan, + struct sk_buff *skb, + const struct vxlan_mdb_entry *mdb_entry, + u32 portid, u32 seq) +{ + struct nlattr *mdb_nest, *mdb_entry_nest; + struct vxlan_mdb_remote *remote; + struct br_port_msg *bpm; + struct nlmsghdr *nlh; + int err; + + nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0); + if (!nlh) + return -EMSGSIZE; + + bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); + bpm->family = AF_BRIDGE; + bpm->ifindex = vxlan->dev->ifindex; + mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB); + if (!mdb_nest) { + err = -EMSGSIZE; + goto cancel; + } + mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); + if (!mdb_entry_nest) { + err = -EMSGSIZE; + goto cancel; + } + + list_for_each_entry(remote, &mdb_entry->remotes, list) { + err = vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote); + if (err) + goto cancel; + } + + nla_nest_end(skb, mdb_entry_nest); + nla_nest_end(skb, mdb_nest); + nlmsg_end(skb, nlh); + + return 0; + +cancel: + nlmsg_cancel(skb, nlh); + return err; +} + +int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, + u32 seq, struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_mdb_entry *mdb_entry; + struct vxlan_mdb_entry_key group; + struct sk_buff *skb; + int err; + + ASSERT_RTNL(); + + err = vxlan_mdb_get_parse(dev, tb, &group, extack); + if (err) + return err; + + mdb_entry = vxlan_mdb_entry_lookup(vxlan, &group); + if (!mdb_entry) { + NL_SET_ERR_MSG_MOD(extack, "MDB entry not found"); + return -ENOENT; + } + + skb = vxlan_mdb_get_reply_alloc(vxlan, mdb_entry); + if (!skb) + return -ENOMEM; + + err = vxlan_mdb_get_reply_fill(vxlan, skb, mdb_entry, portid, seq); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply"); + goto free; + } + + return rtnl_unicast(skb, dev_net(dev), portid); + +free: + kfree_skb(skb); + return err; +} + struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan, struct sk_buff *skb, __be32 src_vni) diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h index 817fa30758..db679c3809 100644 --- a/drivers/net/vxlan/vxlan_private.h +++ b/drivers/net/vxlan/vxlan_private.h @@ -235,6 +235,8 @@ int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags, struct netlink_ext_ack *extack); int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack); +int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, + u32 seq, struct netlink_ext_ack *extack); struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan, struct sk_buff *skb, __be32 src_vni); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index e46b7f5ee4..b09f4c2351 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -687,10 +687,10 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget) napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && - napi_reschedule(napi)) { + napi_schedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" - " napi_reschedule succeeded\n", + " napi_schedule succeeded\n", dev->name); #endif qmgr_disable_irq(rxq); diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c index 4956f0499c..f89581b5e8 100644 --- a/drivers/net/wireguard/cookie.c +++ b/drivers/net/wireguard/cookie.c @@ -12,9 +12,9 @@ #include #include +#include #include -#include void wg_cookie_checker_init(struct cookie_checker *checker, struct wg_device *wg) diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index dc09b75a32..e220d761b1 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include static struct genl_family genl_family; diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 720952b92e..202a33af5a 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include /* This implements Noise_IKpsk2: * diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 19f61225a7..43e0db78d4 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -256,7 +256,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, /* always bulk-out a multiple of 4 bytes */ xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3; - hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx; + hdr = cmd->buf_tx; memset(hdr, 0, sizeof(struct ar5523_cmd_hdr)); hdr->len = cpu_to_be32(xferlen); hdr->code = cpu_to_be32(code); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 666ce384a1..27367bd64e 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -110,7 +110,7 @@ struct ath10k_ce_ring { struct ce_desc_64 *shadow_base; /* keep last */ - void *per_transfer_context[]; + void *per_transfer_context[] __counted_by(nentries); }; struct ath10k_ce_pipe { diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index fe89bc61e5..ad9cf953a2 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1964,20 +1964,13 @@ static ssize_t ath10k_write_btcoex(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - char buf[32]; - size_t buf_size; - int ret; + ssize_t ret; bool val; u32 pdev_param; - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, ubuf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - - if (kstrtobool(buf, &val) != 0) - return -EINVAL; + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; if (!ar->coex_support) return -EOPNOTSUPP; @@ -2000,7 +1993,7 @@ static ssize_t ath10k_write_btcoex(struct file *file, ar->running_fw->fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); if (ret) { - ath10k_warn(ar, "failed to enable btcoex: %d\n", ret); + ath10k_warn(ar, "failed to enable btcoex: %zd\n", ret); ret = count; goto exit; } @@ -2103,19 +2096,12 @@ static ssize_t ath10k_write_peer_stats(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - char buf[32]; - size_t buf_size; - int ret; + ssize_t ret; bool val; - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, ubuf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - - if (kstrtobool(buf, &val) != 0) - return -EINVAL; + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; mutex_lock(&ar->conf_mutex); @@ -2239,21 +2225,16 @@ static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - char buf[32]; - ssize_t len; + ssize_t ret; u32 mask; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoint(buf, 0, &mask)) - return -EINVAL; + ret = kstrtoint_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; ar->sta_tid_stats_mask = mask; - return len; + return count; } static const struct file_operations fops_sta_tid_stats_mask = { diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 7b24297146..c80470e888 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -880,8 +880,7 @@ enum htt_data_tx_status { HTT_DATA_TX_STATUS_OK = 0, HTT_DATA_TX_STATUS_DISCARD = 1, HTT_DATA_TX_STATUS_NO_ACK = 2, - HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */ - HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128 + HTT_DATA_TX_STATUS_POSTPONE = 3 /* HL only */ }; enum htt_data_tx_flags { diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 438b0caace..b261d6371c 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2964,7 +2964,6 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, break; case HTT_DATA_TX_STATUS_DISCARD: case HTT_DATA_TX_STATUS_POSTPONE: - case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; default: diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index bd603feb79..be4d4536aa 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -796,20 +796,16 @@ static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) return 0; } -static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring) +static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, + struct htt_rx_ring_setup_ring32 *rx_ring) { - struct htt_rx_ring_setup_ring32 *ring = - (struct htt_rx_ring_setup_ring32 *)rx_ring; - - ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets); + ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } -static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring) +static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, + struct htt_rx_ring_setup_ring64 *rx_ring) { - struct htt_rx_ring_setup_ring64 *ring = - (struct htt_rx_ring_setup_ring64 *)rx_ring; - - ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets); + ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 03e7bc5b6c..2cf693f3fe 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -728,20 +728,13 @@ static int ath10k_peer_create(struct ath10k *ar, const u8 *addr, enum wmi_peer_type peer_type) { - struct ath10k_vif *arvif; struct ath10k_peer *peer; - int num_peers = 0; int ret; lockdep_assert_held(&ar->conf_mutex); - num_peers = ar->num_peers; - - /* Each vdev consumes a peer entry as well */ - list_for_each_entry(arvif, &ar->arvifs, list) - num_peers++; - - if (num_peers >= ar->max_num_peers) + /* Each vdev consumes a peer entry as well. */ + if (ar->num_peers + list_count_nodes(&ar->arvifs) >= ar->max_num_peers) return -ENOBUFS; ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); @@ -4503,18 +4496,21 @@ void __ath10k_scan_finish(struct ath10k *ar) break; case ATH10K_SCAN_RUNNING: case ATH10K_SCAN_ABORTING: + if (ar->scan.is_roc && ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); + fallthrough; + case ATH10K_SCAN_STARTING: if (!ar->scan.is_roc) { struct cfg80211_scan_info info = { - .aborted = (ar->scan.state == - ATH10K_SCAN_ABORTING), + .aborted = ((ar->scan.state == + ATH10K_SCAN_ABORTING) || + (ar->scan.state == + ATH10K_SCAN_STARTING)), }; ieee80211_scan_completed(ar->hw, &info); - } else if (ar->scan.roc_notify) { - ieee80211_remain_on_channel_expired(ar->hw); } - fallthrough; - case ATH10K_SCAN_STARTING: + ar->scan.state = ATH10K_SCAN_IDLE; ar->scan_channel = NULL; ar->scan.roc_freq = 0; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 23f3662219..2f8c785277 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3148,7 +3148,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) * immediate servicing. */ if (ath10k_ce_interrupt_summary(ar)) { - napi_reschedule(ctx); + napi_schedule(ctx); goto out; } ath10k_pci_enable_legacy_irq(ar); diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 68254a967c..2240994390 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -384,16 +384,11 @@ static ssize_t write_file_spectral_count(struct file *file, { struct ath10k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 255) return -EINVAL; @@ -440,16 +435,11 @@ static ssize_t write_file_spectral_bins(struct file *file, { struct ath10k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index cc47e01145..2c94d50ae3 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -17,7 +17,8 @@ ath11k-y += core.o \ peer.o \ dbring.o \ hw.o \ - pcic.o + pcic.o \ + fw.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index ef11c138bf..f8f5e653cd 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -1084,19 +1085,12 @@ static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) static int ath11k_ahb_probe(struct platform_device *pdev) { struct ath11k_base *ab; - const struct of_device_id *of_id; const struct ath11k_hif_ops *hif_ops; const struct ath11k_pci_ops *pci_ops; enum ath11k_hw_rev hw_rev; int ret; - of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev); - if (!of_id) { - dev_err(&pdev->dev, "failed to find matching device tree id\n"); - return -EINVAL; - } - - hw_rev = (uintptr_t)of_id->data; + hw_rev = (uintptr_t)device_get_match_data(&pdev->dev); switch (hw_rev) { case ATH11K_HW_IPQ8074: diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index fc7c4564a7..0c6ecbb9a0 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -16,6 +16,7 @@ #include "debug.h" #include "hif.h" #include "wow.h" +#include "fw.h" unsigned int ath11k_debug_mask; EXPORT_SYMBOL(ath11k_debug_mask); @@ -985,9 +986,15 @@ int ath11k_core_check_dt(struct ath11k_base *ab) return 0; } +enum ath11k_bdf_name_type { + ATH11K_BDF_NAME_FULL, + ATH11K_BDF_NAME_BUS_NAME, + ATH11K_BDF_NAME_CHIP_ID, +}; + static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len, bool with_variant, - bool bus_type_mode) + enum ath11k_bdf_name_type name_type) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; @@ -998,11 +1005,8 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, switch (ab->id.bdf_search) { case ATH11K_BDF_SEARCH_BUS_AND_BOARD: - if (bus_type_mode) - scnprintf(name, name_len, - "bus=%s", - ath11k_bus_str(ab->hif.bus)); - else + switch (name_type) { + case ATH11K_BDF_NAME_FULL: scnprintf(name, name_len, "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", ath11k_bus_str(ab->hif.bus), @@ -1012,6 +1016,19 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, ab->qmi.target.chip_id, ab->qmi.target.board_id, variant); + break; + case ATH11K_BDF_NAME_BUS_NAME: + scnprintf(name, name_len, + "bus=%s", + ath11k_bus_str(ab->hif.bus)); + break; + case ATH11K_BDF_NAME_CHIP_ID: + scnprintf(name, name_len, + "bus=%s,qmi-chip-id=%d", + ath11k_bus_str(ab->hif.bus), + ab->qmi.target.chip_id); + break; + } break; default: scnprintf(name, name_len, @@ -1030,19 +1047,29 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len) { - return __ath11k_core_create_board_name(ab, name, name_len, true, false); + return __ath11k_core_create_board_name(ab, name, name_len, true, + ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name, size_t name_len) { - return __ath11k_core_create_board_name(ab, name, name_len, false, false); + return __ath11k_core_create_board_name(ab, name, name_len, false, + ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name, size_t name_len) { - return __ath11k_core_create_board_name(ab, name, name_len, false, true); + return __ath11k_core_create_board_name(ab, name, name_len, false, + ATH11K_BDF_NAME_BUS_NAME); +} + +static int ath11k_core_create_chip_id_board_name(struct ath11k_base *ab, char *name, + size_t name_len) +{ + return __ath11k_core_create_board_name(ab, name, name_len, false, + ATH11K_BDF_NAME_CHIP_ID); } const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, @@ -1289,31 +1316,43 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, #define BOARD_NAME_SIZE 200 int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { - char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE]; + char *boardname = NULL, *fallback_boardname = NULL, *chip_id_boardname = NULL; char *filename, filepath[100]; - int ret; + int bd_api; + int ret = 0; filename = ATH11K_BOARD_API2_FILE; + boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); + if (!boardname) { + ret = -ENOMEM; + goto exit; + } - ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname)); + ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create board name: %d", ret); - return ret; + goto exit; } - ab->bd_api = 2; + bd_api = 2; ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) - goto success; + goto exit; + + fallback_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); + if (!fallback_boardname) { + ret = -ENOMEM; + goto exit; + } ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname, - sizeof(fallback_boardname)); + BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create fallback board name: %d", ret); - return ret; + goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname, @@ -1321,9 +1360,30 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) - goto success; + goto exit; + + chip_id_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); + if (!chip_id_boardname) { + ret = -ENOMEM; + goto exit; + } - ab->bd_api = 1; + ret = ath11k_core_create_chip_id_board_name(ab, chip_id_boardname, + BOARD_NAME_SIZE); + if (ret) { + ath11k_err(ab, "failed to create chip id board name: %d", ret); + goto exit; + } + + ret = ath11k_core_fetch_board_data_api_n(ab, bd, chip_id_boardname, + ATH11K_BD_IE_BOARD, + ATH11K_BD_IE_BOARD_NAME, + ATH11K_BD_IE_BOARD_DATA); + + if (!ret) + goto exit; + + bd_api = 1; ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE); if (ret) { ath11k_core_create_firmware_path(ab, filename, @@ -1334,14 +1394,22 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) ath11k_err(ab, "failed to fetch board data for %s from %s\n", fallback_boardname, filepath); + ath11k_err(ab, "failed to fetch board data for %s from %s\n", + chip_id_boardname, filepath); + ath11k_err(ab, "failed to fetch board.bin from %s\n", ab->hw_params.fw.dir); - return ret; } -success: - ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api); - return 0; +exit: + kfree(boardname); + kfree(fallback_boardname); + kfree(chip_id_boardname); + + if (!ret) + ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", bd_api); + + return ret; } int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd) @@ -2005,6 +2073,12 @@ int ath11k_core_pre_init(struct ath11k_base *ab) return ret; } + ret = ath11k_fw_pre_init(ab); + if (ret) { + ath11k_err(ab, "failed to pre init firmware: %d", ret); + return ret; + } + return 0; } EXPORT_SYMBOL(ath11k_core_pre_init); @@ -2035,6 +2109,7 @@ void ath11k_core_deinit(struct ath11k_base *ab) ath11k_hif_power_down(ab); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); + ath11k_fw_destroy(ab); } EXPORT_SYMBOL(ath11k_core_deinit); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index b044477624..667d55e261 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -15,6 +15,8 @@ #include #include #include +#include + #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -29,6 +31,7 @@ #include "dbring.h" #include "spectral.h" #include "wow.h" +#include "fw.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -365,10 +368,6 @@ struct ath11k_vif { struct ieee80211_chanctx_conf chanctx; struct ath11k_arp_ns_offload arp_ns_offload; struct ath11k_rekey_data rekey_data; - -#ifdef CONFIG_ATH11K_DEBUGFS - struct dentry *debugfs_twt; -#endif /* CONFIG_ATH11K_DEBUGFS */ }; struct ath11k_vif_iter { @@ -901,14 +900,11 @@ struct ath11k_base { struct list_head peers; wait_queue_head_t peer_mapping_wq; u8 mac_addr[ETH_ALEN]; - bool wmi_ready; - u32 wlan_init_status; int irq_num[ATH11K_IRQ_NUM_MAX]; struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; struct ath11k_targ_cap target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; - int bd_api; struct ath11k_hw_params hw_params; @@ -984,6 +980,18 @@ struct ath11k_base { const struct ath11k_pci_ops *ops; } pci; + struct { + u32 api_version; + + const struct firmware *fw; + const u8 *amss_data; + size_t amss_len; + const u8 *m3_data; + size_t m3_len; + + DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT); + } fw; + #ifdef CONFIG_NL80211_TESTMODE struct { u32 data_pos; @@ -1225,6 +1233,11 @@ static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif) return (struct ath11k_vif *)vif->drv_priv; } +static inline struct ath11k_sta *ath11k_sta_to_arsta(struct ieee80211_sta *sta) +{ + return (struct ath11k_sta *)sta->drv_priv; +} + static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab, int mac_id) { diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 5bb6fd17fd..0796f4d92b 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -1459,7 +1459,7 @@ static void ath11k_reset_peer_ps_duration(void *data, struct ieee80211_sta *sta) { struct ath11k *ar = data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); arsta->ps_total_duration = 0; @@ -1510,7 +1510,7 @@ static void ath11k_peer_ps_state_disable(void *data, struct ieee80211_sta *sta) { struct ath11k *ar = data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; @@ -1591,10 +1591,10 @@ static const struct file_operations fops_ps_state_enable = { int ath11k_debugfs_register(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; - char pdev_name[5]; + char pdev_name[10]; char buf[100] = {0}; - snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); + snprintf(pdev_name, sizeof(pdev_name), "%s%u", "mac", ar->pdev_idx); ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); if (IS_ERR(ar->debug.debugfs_pdev)) @@ -1893,35 +1893,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = { .open = simple_open }; -void ath11k_debugfs_add_interface(struct ath11k_vif *arvif) +void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_base *ab = arvif->ar->ab; + struct dentry *debugfs_twt; if (arvif->vif->type != NL80211_IFTYPE_AP && !(arvif->vif->type == NL80211_IFTYPE_STATION && test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map))) return; - arvif->debugfs_twt = debugfs_create_dir("twt", - arvif->vif->debugfs_dir); - debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt, + debugfs_twt = debugfs_create_dir("twt", + arvif->vif->debugfs_dir); + debugfs_create_file("add_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_add_dialog); - debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt, + debugfs_create_file("del_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_del_dialog); - debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt, + debugfs_create_file("pause_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_pause_dialog); - debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt, + debugfs_create_file("resume_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_resume_dialog); } -void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif) -{ - if (!arvif->debugfs_twt) - return; - - debugfs_remove_recursive(arvif->debugfs_twt); - arvif->debugfs_twt = NULL; -} diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h index 3af0169f6c..6f630b42e9 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.h +++ b/drivers/net/wireless/ath/ath11k/debugfs.h @@ -306,8 +306,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar) return ar->debug.rx_filter; } -void ath11k_debugfs_add_interface(struct ath11k_vif *arvif); -void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif); +void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); void ath11k_debugfs_add_dbring_entry(struct ath11k *ar, enum wmi_direct_buffer_module id, enum ath11k_dbg_dbr_event event, @@ -386,14 +386,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar, return 0; } -static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif) -{ -} - -static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif) -{ -} - static inline void ath11k_debugfs_add_dbring_entry(struct ath11k *ar, enum wmi_direct_buffer_module id, diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c index 9cc4ef28e7..8c177fba6f 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c @@ -136,7 +136,7 @@ static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct ath11k_htt_data_stats *stats; static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail", @@ -243,7 +243,7 @@ static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; int len = 0, i, retval = 0; @@ -340,7 +340,7 @@ static int ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file) { struct ieee80211_sta *sta = inode->i_private; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct debug_htt_stats_req *stats_req; int type = ar->debug.htt_stats.type; @@ -376,7 +376,7 @@ static int ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file) { struct ieee80211_sta *sta = inode->i_private; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; mutex_lock(&ar->conf_mutex); @@ -413,7 +413,7 @@ static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; int ret, enable; @@ -453,7 +453,7 @@ static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[32] = {0}; int len; @@ -480,7 +480,7 @@ static ssize_t ath11k_dbg_sta_write_delba(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, initiator, reason; int ret; @@ -531,7 +531,7 @@ static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, status; int ret; @@ -581,7 +581,7 @@ static ssize_t ath11k_dbg_sta_write_addba(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, buf_size; int ret; @@ -632,7 +632,7 @@ static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[64]; int len = 0; @@ -652,7 +652,7 @@ static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 aggr_mode; int ret; @@ -697,7 +697,7 @@ ath11k_write_htt_peer_stats_reset(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct htt_ext_stats_cfg_params cfg_params = { 0 }; int ret; @@ -756,7 +756,7 @@ static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[20]; int len; @@ -783,7 +783,7 @@ static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u64 time_since_station_in_power_save; char buf[20]; @@ -817,7 +817,7 @@ static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[20]; u64 power_save_duration; diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index d070bcb3fe..a7252b5255 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -1009,7 +1009,7 @@ void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) { - struct ath11k_base *ab = (struct ath11k_base *)ctx; + struct ath11k_base *ab = ctx; struct sk_buff *msdu = skb; dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index a993e74bba..7eac93ce7a 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1099,7 +1099,7 @@ int ath11k_dp_rx_ampdu_start(struct ath11k *ar, struct ieee80211_ampdu_params *params) { struct ath11k_base *ab = ar->ab; - struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret; @@ -1117,7 +1117,7 @@ int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; - struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; dma_addr_t paddr; bool active; @@ -1256,7 +1256,7 @@ static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, int cur_user; u16 peer_id; - ppdu_info = (struct htt_ppdu_stats_info *)data; + ppdu_info = data; switch (tag) { case HTT_PPDU_STATS_TAG_COMMON: @@ -1388,9 +1388,6 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, u8 tid = HTT_PPDU_STATS_NON_QOS_TID; bool is_ampdu = false; - if (!usr_stats) - return; - if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) return; @@ -1459,7 +1456,7 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, } sta = peer->sta; - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); @@ -4495,8 +4492,7 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); if (src_srng_desc) { - struct ath11k_buffer_addr *src_desc = - (struct ath11k_buffer_addr *)src_srng_desc; + struct ath11k_buffer_addr *src_desc = src_srng_desc; *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); } else { @@ -4515,8 +4511,7 @@ void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, u8 *rbm, void **pp_buf_addr_info) { - struct hal_rx_msdu_link *msdu_link = - (struct hal_rx_msdu_link *)rx_msdu_link_desc; + struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc; struct ath11k_buffer_addr *buf_addr_info; buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; @@ -4557,7 +4552,7 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); u8 tmp = 0; - msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; + msdu_link = msdu_link_desc; msdu_details = &msdu_link->msdu_link[0]; for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { @@ -4654,8 +4649,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, bool is_frag, is_first_msdu; bool drop_mpdu = false; struct ath11k_skb_rxcb *rxcb; - struct hal_reo_entrance_ring *ent_desc = - (struct hal_reo_entrance_ring *)ring_entry; + struct hal_reo_entrance_ring *ent_desc = ring_entry; int buf_id; u32 rx_link_buf_info[2]; u8 rbm; @@ -5103,13 +5097,6 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; - if (!mon_dst_srng) { - ath11k_warn(ar->ab, - "HAL Monitor Destination Ring Init Failed -- %p", - mon_dst_srng); - return; - } - spin_lock_bh(&pmon->mon_lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); @@ -5261,7 +5248,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, goto next_skb; } - arsta = (struct ath11k_sta *)peer->sta->drv_priv; + arsta = ath11k_sta_to_arsta(peer->sta); ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 0dda76f7a4..a5fa08bc62 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -467,7 +467,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) } sta = peer->sta; - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, @@ -627,7 +627,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, ieee80211_free_txskb(ar->hw, msdu); return; } - arsta = (struct ath11k_sta *)peer->sta->drv_priv; + arsta = ath11k_sta_to_arsta(peer->sta); status.sta = peer->sta; status.skb = msdu; status.info = info; diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c new file mode 100644 index 0000000000..8f84fba298 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/fw.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" + +#include "debug.h" + +static int ath11k_fw_request_firmware_api_n(struct ath11k_base *ab, + const char *name) +{ + size_t magic_len, len, ie_len; + int ie_id, i, index, bit, ret; + struct ath11k_fw_ie *hdr; + const u8 *data; + __le32 *timestamp; + + ab->fw.fw = ath11k_core_firmware_request(ab, name); + if (IS_ERR(ab->fw.fw)) { + ret = PTR_ERR(ab->fw.fw); + ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to load %s: %d\n", name, ret); + ab->fw.fw = NULL; + return ret; + } + + data = ab->fw.fw->data; + len = ab->fw.fw->size; + + /* magic also includes the null byte, check that as well */ + magic_len = strlen(ATH11K_FIRMWARE_MAGIC) + 1; + + if (len < magic_len) { + ath11k_err(ab, "firmware image too small to contain magic: %zu\n", + len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH11K_FIRMWARE_MAGIC, magic_len) != 0) { + ath11k_err(ab, "Invalid firmware magic\n"); + ret = -EINVAL; + goto err; + } + + /* jump over the padding */ + magic_len = ALIGN(magic_len, 4); + + /* make sure there's space for padding */ + if (magic_len > len) { + ath11k_err(ab, "No space for padding after magic\n"); + ret = -EINVAL; + goto err; + } + + len -= magic_len; + data += magic_len; + + /* loop elements */ + while (len > sizeof(struct ath11k_fw_ie)) { + hdr = (struct ath11k_fw_ie *)data; + + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data += sizeof(*hdr); + + if (len < ie_len) { + ath11k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n", + ie_id, len, ie_len); + ret = -EINVAL; + goto err; + } + + switch (ie_id) { + case ATH11K_FW_IE_TIMESTAMP: + if (ie_len != sizeof(u32)) + break; + + timestamp = (__le32 *)data; + + ath11k_dbg(ab, ATH11K_DBG_BOOT, "found fw timestamp %d\n", + le32_to_cpup(timestamp)); + break; + case ATH11K_FW_IE_FEATURES: + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "found firmware features ie (%zd B)\n", + ie_len); + + for (i = 0; i < ATH11K_FW_FEATURE_COUNT; i++) { + index = i / 8; + bit = i % 8; + + if (index == ie_len) + break; + + if (data[index] & (1 << bit)) + __set_bit(i, ab->fw.fw_features); + } + + ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "features", "", + ab->fw.fw_features, + sizeof(ab->fw.fw_features)); + break; + case ATH11K_FW_IE_AMSS_IMAGE: + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "found fw image ie (%zd B)\n", + ie_len); + + ab->fw.amss_data = data; + ab->fw.amss_len = ie_len; + break; + case ATH11K_FW_IE_M3_IMAGE: + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "found m3 image ie (%zd B)\n", + ie_len); + + ab->fw.m3_data = data; + ab->fw.m3_len = ie_len; + break; + default: + ath11k_warn(ab, "Unknown FW IE: %u\n", ie_id); + break; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + /* make sure there's space for padding */ + if (ie_len > len) + break; + + len -= ie_len; + data += ie_len; + }; + + return 0; + +err: + release_firmware(ab->fw.fw); + ab->fw.fw = NULL; + return ret; +} + +int ath11k_fw_pre_init(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_fw_request_firmware_api_n(ab, ATH11K_FW_API2_FILE); + if (ret == 0) { + ab->fw.api_version = 2; + goto out; + } + + ab->fw.api_version = 1; + +out: + ath11k_dbg(ab, ATH11K_DBG_BOOT, "using fw api %d\n", + ab->fw.api_version); + + return 0; +} + +void ath11k_fw_destroy(struct ath11k_base *ab) +{ + release_firmware(ab->fw.fw); +} diff --git a/drivers/net/wireless/ath/ath11k/fw.h b/drivers/net/wireless/ath/ath11k/fw.h new file mode 100644 index 0000000000..d9893ceb2c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/fw.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH11K_FW_H +#define ATH11K_FW_H + +#define ATH11K_FW_API2_FILE "firmware-2.bin" +#define ATH11K_FIRMWARE_MAGIC "QCOM-ATH11K-FW" + +enum ath11k_fw_ie_type { + ATH11K_FW_IE_TIMESTAMP = 0, + ATH11K_FW_IE_FEATURES = 1, + ATH11K_FW_IE_AMSS_IMAGE = 2, + ATH11K_FW_IE_M3_IMAGE = 3, +}; + +enum ath11k_fw_features { + /* keep last */ + ATH11K_FW_FEATURE_COUNT, +}; + +int ath11k_fw_pre_init(struct ath11k_base *ab); +void ath11k_fw_destroy(struct ath11k_base *ab); + +#endif /* ATH11K_FW_H */ diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 0a99aa7ddb..23f3af8e37 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -571,7 +571,7 @@ u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type) void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, u8 byte_swap_data) { - struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf; + struct hal_ce_srng_src_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = @@ -586,8 +586,7 @@ void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) { - struct hal_ce_srng_dest_desc *desc = - (struct hal_ce_srng_dest_desc *)buf; + struct hal_ce_srng_dest_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = @@ -597,8 +596,7 @@ void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) u32 ath11k_hal_ce_dst_status_get_length(void *buf) { - struct hal_ce_srng_dst_status_desc *desc = - (struct hal_ce_srng_dst_status_desc *)buf; + struct hal_ce_srng_dst_status_desc *desc = buf; u32 len; len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c index e5ed5efb13..41946795d6 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.c +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c @@ -265,7 +265,7 @@ out: void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr, u32 cookie, u8 manager) { - struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc; + struct ath11k_buffer_addr *binfo = desc; u32 paddr_lo, paddr_hi; paddr_lo = lower_32_bits(paddr); @@ -279,7 +279,7 @@ void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr, void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr, u32 *cookie, u8 *rbm) { - struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc; + struct ath11k_buffer_addr *binfo = desc; *paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) | @@ -292,7 +292,7 @@ void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus, u32 *msdu_cookies, enum hal_rx_buf_return_buf_manager *rbm) { - struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc; + struct hal_rx_msdu_link *link = link_desc; struct hal_rx_msdu_details *msdu; int i; @@ -699,7 +699,7 @@ u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid) void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size, u32 start_seq, enum hal_pn_type type) { - struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr; + struct hal_rx_reo_queue *qdesc = vaddr; struct hal_rx_reo_queue_ext *ext_desc; memset(qdesc, 0, sizeof(*qdesc)); @@ -809,27 +809,25 @@ static inline void ath11k_hal_rx_handle_ofdma_info(void *rx_tlv, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv; rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6); - rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]); + rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->info10); } static inline void ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv; rx_user_status->mpdu_ok_byte_count = - FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT, - __le32_to_cpu(ppdu_end_user->rsvd2[6])); + FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_OK_BYTE_COUNT, + __le32_to_cpu(ppdu_end_user->info8)); rx_user_status->mpdu_err_byte_count = - FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT, - __le32_to_cpu(ppdu_end_user->rsvd2[8])); + FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO9_MPDU_ERR_BYTE_COUNT, + __le32_to_cpu(ppdu_end_user->info9)); } static inline void @@ -903,8 +901,8 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab, FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX, __le32_to_cpu(eu_stats->info2)); ppdu_info->tid = - ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP, - __le32_to_cpu(eu_stats->info6))) - 1; + ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO7_TID_BITMAP, + __le32_to_cpu(eu_stats->info7))) - 1; ppdu_info->tcp_msdu_count = FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT, __le32_to_cpu(eu_stats->info4)); @@ -1540,8 +1538,7 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie, void **pp_buf_addr, u8 *rbm, u32 *msdu_cnt) { - struct hal_reo_entrance_ring *reo_ent_ring = - (struct hal_reo_entrance_ring *)rx_desc; + struct hal_reo_entrance_ring *reo_ent_ring = rx_desc; struct ath11k_buffer_addr *buf_addr_info; struct rx_mpdu_desc *rx_mpdu_desc_info_details; diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h index 61bd8416c4..472a52cf58 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.h +++ b/drivers/net/wireless/ath/ath11k/hal_rx.h @@ -149,7 +149,7 @@ struct hal_rx_mon_ppdu_info { u8 beamformed; u8 rssi_comb; u8 rssi_chain_pri20[HAL_RX_MAX_NSS]; - u8 tid; + u16 tid; u16 ht_flags; u16 vht_flags; u16 he_flags; @@ -219,11 +219,11 @@ struct hal_rx_ppdu_start { #define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0) #define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16) -#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0) -#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16) +#define HAL_RX_PPDU_END_USER_STATS_INFO7_TID_BITMAP GENMASK(15, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO7_TID_EOSP_BITMAP GENMASK(31, 16) -#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0) -#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_OK_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO9_MPDU_ERR_BYTE_COUNT GENMASK(24, 0) struct hal_rx_ppdu_end_user_stats { __le32 rsvd0[2]; @@ -236,7 +236,13 @@ struct hal_rx_ppdu_end_user_stats { __le32 info4; __le32 info5; __le32 info6; - __le32 rsvd2[11]; + __le32 info7; + __le32 rsvd2[4]; + __le32 info8; + __le32 rsvd3; + __le32 info9; + __le32 rsvd4[2]; + __le32 info10; } __packed; struct hal_rx_ppdu_end_user_stats_ext { diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c index d1b0e36e04..b919df6ce7 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.c +++ b/drivers/net/wireless/ath/ath11k/hal_tx.c @@ -37,7 +37,7 @@ static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = { void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd, struct hal_tx_info *ti) { - struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd; + struct hal_tcl_data_cmd *tcl_cmd = cmd; tcl_cmd->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr); diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index 659b80d2ab..d68ed4214d 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -9,18 +9,18 @@ #include "core.h" struct ath11k_hif_ops { - u32 (*read32)(struct ath11k_base *sc, u32 address); - void (*write32)(struct ath11k_base *sc, u32 address, u32 data); + u32 (*read32)(struct ath11k_base *ab, u32 address); + void (*write32)(struct ath11k_base *ab, u32 address, u32 data); int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end); - void (*irq_enable)(struct ath11k_base *sc); - void (*irq_disable)(struct ath11k_base *sc); - int (*start)(struct ath11k_base *sc); - void (*stop)(struct ath11k_base *sc); - int (*power_up)(struct ath11k_base *sc); - void (*power_down)(struct ath11k_base *sc); + void (*irq_enable)(struct ath11k_base *ab); + void (*irq_disable)(struct ath11k_base *ab); + int (*start)(struct ath11k_base *ab); + void (*stop)(struct ath11k_base *ab); + int (*power_up)(struct ath11k_base *ab); + void (*power_down)(struct ath11k_base *ab); int (*suspend)(struct ath11k_base *ab); int (*resume)(struct ath11k_base *ab); - int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id, + int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); int (*get_user_msi_vector)(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, @@ -44,34 +44,34 @@ static inline void ath11k_hif_ce_irq_disable(struct ath11k_base *ab) ab->hif.ops->ce_irq_disable(ab); } -static inline int ath11k_hif_start(struct ath11k_base *sc) +static inline int ath11k_hif_start(struct ath11k_base *ab) { - return sc->hif.ops->start(sc); + return ab->hif.ops->start(ab); } -static inline void ath11k_hif_stop(struct ath11k_base *sc) +static inline void ath11k_hif_stop(struct ath11k_base *ab) { - sc->hif.ops->stop(sc); + ab->hif.ops->stop(ab); } -static inline void ath11k_hif_irq_enable(struct ath11k_base *sc) +static inline void ath11k_hif_irq_enable(struct ath11k_base *ab) { - sc->hif.ops->irq_enable(sc); + ab->hif.ops->irq_enable(ab); } -static inline void ath11k_hif_irq_disable(struct ath11k_base *sc) +static inline void ath11k_hif_irq_disable(struct ath11k_base *ab) { - sc->hif.ops->irq_disable(sc); + ab->hif.ops->irq_disable(ab); } -static inline int ath11k_hif_power_up(struct ath11k_base *sc) +static inline int ath11k_hif_power_up(struct ath11k_base *ab) { - return sc->hif.ops->power_up(sc); + return ab->hif.ops->power_up(ab); } -static inline void ath11k_hif_power_down(struct ath11k_base *sc) +static inline void ath11k_hif_power_down(struct ath11k_base *ab) { - sc->hif.ops->power_down(sc); + ab->hif.ops->power_down(ab); } static inline int ath11k_hif_suspend(struct ath11k_base *ab) @@ -90,14 +90,14 @@ static inline int ath11k_hif_resume(struct ath11k_base *ab) return 0; } -static inline u32 ath11k_hif_read32(struct ath11k_base *sc, u32 address) +static inline u32 ath11k_hif_read32(struct ath11k_base *ab, u32 address) { - return sc->hif.ops->read32(sc, address); + return ab->hif.ops->read32(ab, address); } -static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 data) +static inline void ath11k_hif_write32(struct ath11k_base *ab, u32 address, u32 data) { - sc->hif.ops->write32(sc, address, data); + ab->hif.ops->write32(ab, address, data); } static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf, @@ -109,10 +109,10 @@ static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf, return ab->hif.ops->read(ab, buf, start, end); } -static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id, +static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) { - return sc->hif.ops->map_service_to_pipe(sc, service_id, ul_pipe, dl_pipe); + return ab->hif.ops->map_service_to_pipe(ab, service_id, ul_pipe, dl_pipe); } static inline int ath11k_get_user_msi_vector(struct ath11k_base *ab, char *user_name, diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h index f429b37cfd..d31e501c80 100644 --- a/drivers/net/wireless/ath/ath11k/htc.h +++ b/drivers/net/wireless/ath/ath11k/htc.h @@ -156,18 +156,6 @@ struct ath11k_htc_record { }; } __packed __aligned(4); -/* note: the trailer offset is dynamic depending - * on payload length. this is only a struct layout draft - */ -struct ath11k_htc_frame { - struct ath11k_htc_hdr hdr; - union { - struct ath11k_htc_msg msg; - u8 payload[0]; - }; - struct ath11k_htc_record trailer[0]; -} __packed __aligned(4); - enum ath11k_htc_svc_gid { ATH11K_HTC_SVC_GRP_RSVD = 0, ATH11K_HTC_SVC_GRP_WMI = 1, diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index b328a05998..71c6dab1ae 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -2831,7 +2832,7 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar, lockdep_assert_held(&ar->conf_mutex); - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); memset(arg, 0, sizeof(*arg)); @@ -4314,7 +4315,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: @@ -4905,7 +4906,7 @@ static int ath11k_mac_station_add(struct ath11k *ar, { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct peer_create_params peer_param; int ret; @@ -5029,7 +5030,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k_peer *peer; int ret = 0; @@ -5195,7 +5196,7 @@ static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool enabled) { struct ath11k *ar = hw->priv; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); if (enabled && !arsta->use_4addr_set) { ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk); @@ -5209,7 +5210,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, u32 changed) { struct ath11k *ar = hw->priv; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; u32 bw, smps; @@ -5893,8 +5894,9 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar, ar->mac.iftype[NL80211_BAND_2GHZ], NL80211_BAND_2GHZ); band = &ar->mac.sbands[NL80211_BAND_2GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ]; - band->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(band, + ar->mac.iftype[NL80211_BAND_2GHZ], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { @@ -5902,8 +5904,9 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar, ar->mac.iftype[NL80211_BAND_5GHZ], NL80211_BAND_5GHZ); band = &ar->mac.sbands[NL80211_BAND_5GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ]; - band->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(band, + ar->mac.iftype[NL80211_BAND_5GHZ], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && @@ -5912,8 +5915,9 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar, ar->mac.iftype[NL80211_BAND_6GHZ], NL80211_BAND_6GHZ); band = &ar->mac.sbands[NL80211_BAND_6GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ]; - band->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(band, + ar->mac.iftype[NL80211_BAND_6GHZ], + count); } } @@ -6199,7 +6203,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, } if (control->sta) - arsta = (struct ath11k_sta *)control->sta->drv_priv; + arsta = ath11k_sta_to_arsta(control->sta); ret = ath11k_dp_tx(ar, arvif, arsta, skb); if (unlikely(ret)) { @@ -6746,13 +6750,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, goto err; } - /* In the case of hardware recovery, debugfs files are - * not deleted since ieee80211_ops.remove_interface() is - * not invoked. In such cases, try to delete the files. - * These will be re-created later. - */ - ath11k_debugfs_remove_interface(arvif); - memset(arvif, 0, sizeof(*arvif)); arvif->ar = ar; @@ -6929,8 +6926,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, ath11k_dp_vdev_tx_attach(ar, arvif); - ath11k_debugfs_add_interface(arvif); - if (vif->type != NL80211_IFTYPE_MONITOR && test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_vdev_create(ar); @@ -6967,8 +6962,8 @@ err: static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx) { - struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx; - struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb); + struct ieee80211_vif *vif = ctx; + struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); if (skb_cb->vif == vif) skb_cb->vif = NULL; @@ -7046,8 +7041,6 @@ err_vdev_del: /* Recalc txpower for remaining vdev */ ath11k_mac_txpower_recalc(ar); - ath11k_debugfs_remove_interface(arvif); - /* TODO: recal traffic pause state based on the available vdevs */ mutex_unlock(&ar->conf_mutex); @@ -7193,6 +7186,7 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, struct wmi_vdev_start_req_arg arg = {}; const struct cfg80211_chan_def *chandef = &ctx->def; int ret = 0; + unsigned int dfs_cac_time; lockdep_assert_held(&ar->conf_mutex); @@ -7272,20 +7266,21 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n", arvif->vif->addr, arvif->vdev_id); - /* Enable CAC Flag in the driver by checking the channel DFS cac time, - * i.e dfs_cac_ms value which will be valid only for radar channels - * and state as NL80211_DFS_USABLE which indicates CAC needs to be + /* Enable CAC Flag in the driver by checking the all sub-channel's DFS + * state as NL80211_DFS_USABLE which indicates CAC needs to be * done before channel usage. This flags is used to drop rx packets. * during CAC. */ /* TODO Set the flag for other interface types as required */ - if (arvif->vdev_type == WMI_VDEV_TYPE_AP && - chandef->chan->dfs_cac_ms && - chandef->chan->dfs_state == NL80211_DFS_USABLE) { + if (arvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled && + cfg80211_chandef_dfs_usable(ar->hw->wiphy, chandef)) { set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); + dfs_cac_time = cfg80211_chandef_dfs_cac_time(ar->hw->wiphy, + chandef); ath11k_dbg(ab, ATH11K_DBG_MAC, - "CAC Started in chan_freq %d for vdev %d\n", - arg.channel.freq, arg.vdev_id); + "cac started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n", + dfs_cac_time, arg.channel.freq, chandef->center_freq1, + arg.vdev_id); } ret = ath11k_mac_set_txbf_conf(arvif); @@ -7910,12 +7905,14 @@ ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap) static bool ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, + struct ath11k_vif *arvif, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, int *nss) { struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); + const struct ieee80211_sta_he_cap *he_cap; u16 he_mcs_map = 0; u8 ht_nss_mask = 0; u8 vht_nss_mask = 0; @@ -7946,7 +7943,11 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, return false; } - he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, arvif->vif); + if (!he_cap) + return false; + + he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(he_cap)); for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { if (mask->control[band].he_mcs[i] == 0) @@ -8223,7 +8224,7 @@ static void ath11k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arvif->ar; spin_lock_bh(&ar->data_lock); @@ -8362,7 +8363,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_disable_peer_fixed_rate, arvif); - } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask, + } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask, &single_nss)) { rate = WMI_FIXED_RATE_NONE; nss = single_nss; @@ -8627,7 +8628,7 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; s8 signal; bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, @@ -8905,7 +8906,7 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); - struct scan_req_params arg; + struct scan_req_params *arg; int ret; u32 scan_time_msec; @@ -8937,27 +8938,31 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; - memset(&arg, 0, sizeof(arg)); - ath11k_wmi_start_scan_init(ar, &arg); - arg.num_chan = 1; - arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), - GFP_KERNEL); - if (!arg.chan_list) { + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { ret = -ENOMEM; goto exit; } + ath11k_wmi_start_scan_init(ar, arg); + arg->num_chan = 1; + arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), + GFP_KERNEL); + if (!arg->chan_list) { + ret = -ENOMEM; + goto free_arg; + } - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH11K_SCAN_ID; - arg.chan_list[0] = chan->center_freq; - arg.dwell_time_active = scan_time_msec; - arg.dwell_time_passive = scan_time_msec; - arg.max_scan_time = scan_time_msec; - arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; - arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ; - arg.burst_duration = duration; - - ret = ath11k_start_scan(ar, &arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH11K_SCAN_ID; + arg->chan_list[0] = chan->center_freq; + arg->dwell_time_active = scan_time_msec; + arg->dwell_time_passive = scan_time_msec; + arg->max_scan_time = scan_time_msec; + arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE; + arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ; + arg->burst_duration = duration; + + ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret); @@ -8983,7 +8988,9 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, ret = 0; free_chan_list: - kfree(arg.chan_list); + kfree(arg->chan_list); +free_arg: + kfree(arg); exit: mutex_unlock(&ar->conf_mutex); return ret; @@ -9131,6 +9138,7 @@ static const struct ieee80211_ops ath11k_ops = { #endif #ifdef CONFIG_ATH11K_DEBUGFS + .vif_add_debugfs = ath11k_debugfs_op_vif_add, .sta_add_debugfs = ath11k_debugfs_sta_op_add, #endif diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 3ac689f1de..afeabd6ecc 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -333,6 +334,7 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); break; case MHI_CB_EE_RDDM: + ath11k_warn(ab, "firmware crashed: MHI_CB_EE_RDDM\n"); if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))) queue_work(ab->workqueue_aux, &ab->reset_work); break; @@ -389,16 +391,23 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) if (!mhi_ctrl) return -ENOMEM; - ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE, - ab_pci->amss_path, - sizeof(ab_pci->amss_path)); - ab_pci->mhi_ctrl = mhi_ctrl; mhi_ctrl->cntrl_dev = ab->dev; - mhi_ctrl->fw_image = ab_pci->amss_path; mhi_ctrl->regs = ab->mem; mhi_ctrl->reg_len = ab->mem_len; + if (ab->fw.amss_data && ab->fw.amss_len > 0) { + /* use MHI firmware file from firmware-N.bin */ + mhi_ctrl->fw_data = ab->fw.amss_data; + mhi_ctrl->fw_sz = ab->fw.amss_len; + } else { + /* use the old separate mhi.bin MHI firmware file */ + ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE, + ab_pci->amss_path, + sizeof(ab_pci->amss_path)); + mhi_ctrl->fw_image = ab_pci->amss_path; + } + ret = ath11k_mhi_get_msi(ab_pci); if (ret) { ath11k_err(ab, "failed to get msi for mhi\n"); diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index c63083633b..e602d41301 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -422,14 +422,14 @@ static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } -static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc) +static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) { int i; - clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags); + clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath11k_pcic_ext_grp_disable(irq_grp); @@ -460,8 +460,6 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) { int i; - set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); - for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; @@ -471,6 +469,8 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) } ath11k_pcic_ext_grp_enable(irq_grp); } + + set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); } EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable); diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c index 114aa3a9a3..1c79a932d1 100644 --- a/drivers/net/wireless/ath/ath11k/peer.c +++ b/drivers/net/wireless/ath/ath11k/peer.c @@ -446,7 +446,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; if (sta) { - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) | FIELD_PREP(HTT_TCL_META_DATA_PEER_ID, peer->peer_id); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 41fad03a30..c270dc46d5 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2502,38 +2502,56 @@ out: static int ath11k_qmi_m3_load(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - const struct firmware *fw; + const struct firmware *fw = NULL; + const void *m3_data; char path[100]; + size_t m3_len; int ret; - fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE); - if (IS_ERR(fw)) { - ret = PTR_ERR(fw); - ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE, - path, sizeof(path)); - ath11k_err(ab, "failed to load %s: %d\n", path, ret); - return ret; - } + if (m3_mem->vaddr) + /* m3 firmware buffer is already available in the DMA buffer */ + return 0; - if (m3_mem->vaddr || m3_mem->size) - goto skip_m3_alloc; + if (ab->fw.m3_data && ab->fw.m3_len > 0) { + /* firmware-N.bin had a m3 firmware file so use that */ + m3_data = ab->fw.m3_data; + m3_len = ab->fw.m3_len; + } else { + /* No m3 file in firmware-N.bin so try to request old + * separate m3.bin. + */ + fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE); + if (IS_ERR(fw)) { + ret = PTR_ERR(fw); + ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE, + path, sizeof(path)); + ath11k_err(ab, "failed to load %s: %d\n", path, ret); + return ret; + } + + m3_data = fw->data; + m3_len = fw->size; + } m3_mem->vaddr = dma_alloc_coherent(ab->dev, - fw->size, &m3_mem->paddr, + m3_len, &m3_mem->paddr, GFP_KERNEL); if (!m3_mem->vaddr) { ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n", fw->size); - release_firmware(fw); - return -ENOMEM; + ret = -ENOMEM; + goto out; } -skip_m3_alloc: - memcpy(m3_mem->vaddr, fw->data, fw->size); - m3_mem->size = fw->size; + memcpy(m3_mem->vaddr, m3_data, m3_len); + m3_mem->size = m3_len; + + ret = 0; + +out: release_firmware(fw); - return 0; + return ret; } static void ath11k_qmi_m3_free(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 7f9fb968da..3c7debae80 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -352,6 +352,16 @@ static u32 ath11k_map_fw_reg_flags(u16 reg_flags) return flags; } +static u32 ath11k_map_fw_phy_flags(u32 phy_flags) +{ + u32 flags = 0; + + if (phy_flags & ATH11K_REG_PHY_BITMAP_NO11AX) + flags |= NL80211_RRF_NO_HE; + + return flags; +} + static bool ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1, struct ieee80211_reg_rule *rule2) @@ -685,6 +695,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab, } flags |= ath11k_map_fw_reg_flags(reg_rule->flags); + flags |= ath11k_map_fw_phy_flags(reg_info->phybitmap); ath11k_reg_update_rule(tmp_regd->reg_rules + i, reg_rule->start_freq, diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 2f284f2637..84daa6543b 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -24,6 +24,9 @@ enum ath11k_dfs_region { ATH11K_DFS_REG_UNDEF, }; +/* Phy bitmaps */ +#define ATH11K_REG_PHY_BITMAP_NO11AX BIT(5) + /* ATH11K Regulatory API's */ void ath11k_reg_init(struct ath11k *ar); void ath11k_reg_free(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c index 705868198d..0b7b7122cc 100644 --- a/drivers/net/wireless/ath/ath11k/spectral.c +++ b/drivers/net/wireless/ath/ath11k/spectral.c @@ -382,16 +382,11 @@ static ssize_t ath11k_write_file_spectral_count(struct file *file, { struct ath11k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX) return -EINVAL; @@ -437,16 +432,11 @@ static ssize_t ath11k_write_file_spectral_bins(struct file *file, { struct ath11k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val < ATH11K_SPECTRAL_MIN_BINS || val > ar->ab->hw_params.spectral.max_fft_bins) @@ -598,7 +588,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar, return -EINVAL; } - tlv = (struct spectral_tlv *)data; + tlv = data; tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header)); /* convert Dword into bytes */ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE; diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c index 23ed01bd44..c9b012f97b 100644 --- a/drivers/net/wireless/ath/ath11k/thermal.c +++ b/drivers/net/wireless/ath/ath11k/thermal.c @@ -125,7 +125,7 @@ ATTRIBUTE_GROUPS(ath11k_hwmon); int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state) { - struct ath11k_base *sc = ar->ab; + struct ath11k_base *ab = ar->ab; struct thermal_mitigation_params param; int ret = 0; @@ -147,14 +147,14 @@ int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state) ret = ath11k_wmi_send_thermal_mitigation_param_cmd(ar, ¶m); if (ret) { - ath11k_warn(sc, "failed to send thermal mitigation duty cycle %u ret %d\n", + ath11k_warn(ab, "failed to send thermal mitigation duty cycle %u ret %d\n", throttle_state, ret); } return ret; } -int ath11k_thermal_register(struct ath11k_base *sc) +int ath11k_thermal_register(struct ath11k_base *ab) { struct thermal_cooling_device *cdev; struct device *hwmon_dev; @@ -162,8 +162,8 @@ int ath11k_thermal_register(struct ath11k_base *sc) struct ath11k_pdev *pdev; int i, ret; - for (i = 0; i < sc->num_radios; i++) { - pdev = &sc->pdevs[i]; + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; @@ -172,7 +172,7 @@ int ath11k_thermal_register(struct ath11k_base *sc) &ath11k_thermal_ops); if (IS_ERR(cdev)) { - ath11k_err(sc, "failed to setup thermal device result: %ld\n", + ath11k_err(ab, "failed to setup thermal device result: %ld\n", PTR_ERR(cdev)); ret = -EINVAL; goto err_thermal_destroy; @@ -183,7 +183,7 @@ int ath11k_thermal_register(struct ath11k_base *sc) ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj, "cooling_device"); if (ret) { - ath11k_err(sc, "failed to create cooling device symlink\n"); + ath11k_err(ab, "failed to create cooling device symlink\n"); goto err_thermal_destroy; } @@ -204,18 +204,18 @@ int ath11k_thermal_register(struct ath11k_base *sc) return 0; err_thermal_destroy: - ath11k_thermal_unregister(sc); + ath11k_thermal_unregister(ab); return ret; } -void ath11k_thermal_unregister(struct ath11k_base *sc) +void ath11k_thermal_unregister(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; - for (i = 0; i < sc->num_radios; i++) { - pdev = &sc->pdevs[i]; + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h index 3e39675ef7..83cb676867 100644 --- a/drivers/net/wireless/ath/ath11k/thermal.h +++ b/drivers/net/wireless/ath/ath11k/thermal.h @@ -26,17 +26,17 @@ struct ath11k_thermal { }; #if IS_REACHABLE(CONFIG_THERMAL) -int ath11k_thermal_register(struct ath11k_base *sc); -void ath11k_thermal_unregister(struct ath11k_base *sc); +int ath11k_thermal_register(struct ath11k_base *ab); +void ath11k_thermal_unregister(struct ath11k_base *ab); int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state); void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature); #else -static inline int ath11k_thermal_register(struct ath11k_base *sc) +static inline int ath11k_thermal_register(struct ath11k_base *ab) { return 0; } -static inline void ath11k_thermal_unregister(struct ath11k_base *sc) +static inline void ath11k_thermal_unregister(struct ath11k_base *ab) { } diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 1c07f55c25..2845b4313d 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -292,18 +292,18 @@ err_pull: int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id) { - struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab; + struct ath11k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; - struct ath11k_base *ab = wmi_sc->ab; + struct ath11k_base *ab = wmi_ab->ab; might_sleep(); if (ab->hw_params.credit_flow) { - wait_event_timeout(wmi_sc->tx_credits_wq, ({ + wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, - &wmi_sc->ab->dev_flags)) + &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); @@ -313,7 +313,7 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, - &wmi_sc->ab->dev_flags)) + &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -ENOBUFS); @@ -321,10 +321,10 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, } if (ret == -EAGAIN) - ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + ath11k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); if (ret == -ENOBUFS) - ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n", + ath11k_warn(wmi_ab->ab, "ce desc not available for wmi command %d\n", cmd_id); return ret; @@ -611,10 +611,10 @@ static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *sk return 0; } -struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len) +struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; - struct ath11k_base *ab = wmi_sc->ab; + struct ath11k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); @@ -2281,7 +2281,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; - tmp_ptr = (u32 *)ptr; + tmp_ptr = ptr; for (i = 0; i < params->num_chan; ++i) tmp_ptr[i] = params->chan_list[i]; @@ -4148,7 +4148,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, ptr += TLV_HDR_SIZE + len; if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { - hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr; + hw_mode = ptr; hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, @@ -4168,7 +4168,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, len = sizeof(*band_to_mac); for (idx = 0; idx < param->num_band_to_mac; idx++) { - band_to_mac = (void *)ptr; + band_to_mac = ptr; band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_BAND_TO_MAC) | @@ -4291,7 +4291,7 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, int ath11k_wmi_cmd_init(struct ath11k_base *ab) { - struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab; + struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; struct wmi_init_cmd_param init_param; struct target_resource_config config; @@ -4304,12 +4304,12 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) ab->wmi_ab.svc_map)) config.is_reg_cc_ext_event_supported = 1; - memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config)); + memcpy(&wmi_ab->wlan_resource_config, &config, sizeof(config)); - init_param.res_cfg = &wmi_sc->wlan_resource_config; - init_param.num_mem_chunks = wmi_sc->num_mem_chunks; - init_param.hw_mode_id = wmi_sc->preferred_hw_mode; - init_param.mem_chunks = wmi_sc->mem_chunks; + init_param.res_cfg = &wmi_ab->wlan_resource_config; + init_param.num_mem_chunks = wmi_ab->num_mem_chunks; + init_param.hw_mode_id = wmi_ab->preferred_hw_mode; + init_param.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params.single_pdev_only) init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; @@ -4317,7 +4317,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) init_param.num_band_to_mac = ab->num_radios; ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); - return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param); + return ath11k_init_cmd_send(&wmi_ab->wmi[0], &init_param); } int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, @@ -5440,10 +5440,11 @@ static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab, } ath11k_dbg(ab, ATH11K_DBG_WMI, - "cc_ext %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d", + "cc_ext %s dfs %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d phy_bitmap 0x%x", reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, - reg_info->min_bw_5ghz, reg_info->max_bw_5ghz); + reg_info->min_bw_5ghz, reg_info->max_bw_5ghz, + reg_info->phybitmap); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", @@ -6452,7 +6453,7 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, goto exit; } - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); @@ -6540,7 +6541,7 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, arvif->bssid, NULL); if (sta) { - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); arsta->rssi_beacon = src->beacon_snr; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats vdev id %d snr %d\n", @@ -7222,14 +7223,12 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, memset(&fixed_param, 0, sizeof(fixed_param)); memcpy(&fixed_param, (struct wmi_ready_event *)ptr, min_t(u16, sizeof(fixed_param), len)); - ab->wlan_init_status = fixed_param.ready_event_min.status; rdy_parse->num_extra_mac_addr = fixed_param.ready_event_min.num_extra_mac_addr; ether_addr_copy(ab->mac_addr, fixed_param.ready_event_min.mac_addr.addr); ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum; - ab->wmi_ready = true; break; case WMI_TAG_ARRAY_FIXED_STRUCT: addr_list = (struct wmi_mac_addr *)ptr; @@ -7469,7 +7468,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab, goto exit; } - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 3df8059d55..6c01b282fc 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -19,6 +19,27 @@ unsigned int ath12k_debug_mask; module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); +static int ath12k_core_rfkill_config(struct ath12k_base *ab) +{ + struct ath12k *ar; + int ret = 0, i; + + if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL)) + return 0; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + + ret = ath12k_mac_rfkill_config(ar); + if (ret && ret != -EOPNOTSUPP) { + ath12k_warn(ab, "failed to configure rfkill: %d", ret); + return ret; + } + } + + return ret; +} + int ath12k_core_suspend(struct ath12k_base *ab) { int ret; @@ -339,6 +360,7 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) { char boardname[BOARD_NAME_SIZE]; + int bd_api; int ret; ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); @@ -347,12 +369,12 @@ int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) return ret; } - ab->bd_api = 2; + bd_api = 2; ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname); if (!ret) goto success; - ab->bd_api = 1; + bd_api = 1; ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE); if (ret) { ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", @@ -361,7 +383,7 @@ int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) } success: - ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api); return 0; } @@ -377,6 +399,75 @@ static void ath12k_core_stop(struct ath12k_base *ab) /* De-Init of components as needed */ } +static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath12k_base *ab = data; + const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC; + struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr; + ssize_t copied; + size_t len; + int i; + + if (ab->qmi.target.bdf_ext[0] != '\0') + return; + + if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + if (!smbios->bdf_enabled) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + len = min_t(size_t, + strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext)); + for (i = 0; i < len; i++) { + if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic prefix */ + copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic), + sizeof(ab->qmi.target.bdf_ext)); + if (copied < 0) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate\n"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext); +} + +int ath12k_core_check_smbios(struct ath12k_base *ab) +{ + ab->qmi.target.bdf_ext[0] = '\0'; + dmi_walk(ath12k_core_check_bdfext, ab); + + if (ab->qmi.target.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + static int ath12k_core_soc_create(struct ath12k_base *ab) { int ret; @@ -603,10 +694,19 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) goto err_core_stop; } ath12k_hif_irq_enable(ab); + + ret = ath12k_core_rfkill_config(ab); + if (ret && ret != -EOPNOTSUPP) { + ath12k_err(ab, "failed to config rfkill: %d\n", ret); + goto err_core_pdev_destroy; + } + mutex_unlock(&ab->core_lock); return 0; +err_core_pdev_destroy: + ath12k_core_pdev_destroy(ab); err_core_stop: ath12k_core_stop(ab); ath12k_mac_destroy(ab); @@ -655,6 +755,27 @@ err_hal_srng_deinit: return ret; } +static void ath12k_rfkill_work(struct work_struct *work) +{ + struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work); + struct ath12k *ar; + bool rfkill_radio_on; + int i; + + spin_lock_bh(&ab->base_lock); + rfkill_radio_on = ab->rfkill_radio_on; + spin_unlock_bh(&ab->base_lock); + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + if (!ar) + continue; + + ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on); + wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on); + } +} + void ath12k_core_halt(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; @@ -668,6 +789,7 @@ void ath12k_core_halt(struct ath12k *ar) ath12k_mac_peer_cleanup_all(ar); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ab->rfkill_work); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); @@ -685,6 +807,9 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) ab->stats.fw_crash_counter++; spin_unlock_bh(&ab->base_lock); + if (ab->is_reset) + set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); + for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; @@ -823,6 +948,8 @@ static void ath12k_core_reset(struct work_struct *work) ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n"); ab->is_reset = true; + atomic_set(&ab->recovery_start_count, 0); + reinit_completion(&ab->recovery_start); atomic_set(&ab->recovery_count, 0); ath12k_core_pre_reconfigure_recovery(ab); @@ -830,15 +957,13 @@ static void ath12k_core_reset(struct work_struct *work) reinit_completion(&ab->reconfigure_complete); ath12k_core_post_reconfigure_recovery(ab); - reinit_completion(&ab->recovery_start); - atomic_set(&ab->recovery_start_count, 0); - ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n"); time_left = wait_for_completion_timeout(&ab->recovery_start, ATH12K_RECOVER_START_TIMEOUT_HZ); ath12k_hif_power_down(ab); + ath12k_qmi_free_resource(ab); ath12k_hif_power_up(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n"); @@ -922,6 +1047,8 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); INIT_WORK(&ab->restart_work, ath12k_core_restart); INIT_WORK(&ab->reset_work, ath12k_core_reset); + INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work); + timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index d873b573da..68c42ca44f 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -32,6 +34,15 @@ /* Pending management packets threshold for dropping probe responses */ #define ATH12K_PRB_RSP_DROP_THRESHOLD ((ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4) +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH12K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH12K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* The magic used by QCA spec */ +#define ATH12K_SMBIOS_BDF_EXT_MAGIC "BDF_" + #define ATH12K_INVALID_HW_MAC_ID 0xFF #define ATH12K_RX_RATE_TABLE_NUM 320 #define ATH12K_RX_RATE_TABLE_11AX_NUM 576 @@ -129,6 +140,13 @@ struct ath12k_ext_irq_grp { struct net_device napi_ndev; }; +struct ath12k_smbios_bdf { + struct dmi_header hdr; + u32 padding; + u8 bdf_enabled; + u8 bdf_ext[]; +} __packed; + #define HEHANDLE_CAP_PHYINFO_SIZE 3 #define HECAP_PHYINFO_SIZE 9 #define HECAP_MACINFO_SIZE 5 @@ -719,7 +737,6 @@ struct ath12k_base { struct ath12k_wmi_target_cap_arg target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; - int bd_api; const struct ath12k_hw_params *hw_params; @@ -771,6 +788,10 @@ struct ath12k_base { u64 fw_soc_drop_count; bool static_window_map; + struct work_struct rfkill_work; + /* true means radio is on */ + bool rfkill_radio_on; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; @@ -788,7 +809,8 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, int ath12k_core_fetch_bdf(struct ath12k_base *ath12k, struct ath12k_board_data *bd); void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd); - +int ath12k_core_check_dt(struct ath12k_base *ath12k); +int ath12k_core_check_smbios(struct ath12k_base *ab); void ath12k_core_halt(struct ath12k *ar); int ath12k_core_resume(struct ath12k_base *ab); int ath12k_core_suspend(struct ath12k_base *ab); @@ -830,6 +852,11 @@ static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif) return (struct ath12k_vif *)vif->drv_priv; } +static inline struct ath12k_sta *ath12k_sta_to_arsta(struct ieee80211_sta *sta) +{ + return (struct ath12k_sta *)sta->drv_priv; +} + static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab, int mac_id) { diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c index 67893923e0..45d33279e6 100644 --- a/drivers/net/wireless/ath/ath12k/debug.c +++ b/drivers/net/wireless/ath/ath12k/debug.c @@ -64,7 +64,7 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask, vaf.va = &args; if (ath12k_debug_mask & mask) - dev_dbg(ab->dev, "%pV", &vaf); + dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf); /* TODO: trace log */ diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index f1e57e98bd..f44bc5494c 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -13,8 +13,7 @@ static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv; rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->usr_resp_ref); @@ -23,13 +22,12 @@ static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, } static void -ath12k_dp_mon_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, +ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *stats, + void *ppduinfo, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; - u32 mpdu_ok_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_ok_cnt); - u32 mpdu_err_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_err_cnt); + u32 mpdu_ok_byte_count = __le32_to_cpu(stats->mpdu_ok_cnt); + u32 mpdu_err_byte_count = __le32_to_cpu(stats->mpdu_err_cnt); rx_user_status->mpdu_ok_byte_count = u32_get_bits(mpdu_ok_byte_count, @@ -2376,7 +2374,7 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, return; } - arsta = (struct ath12k_sta *)peer->sta->drv_priv; + arsta = ath12k_sta_to_arsta(peer->sta); rx_stats = arsta->rx_stats; if (!rx_stats) @@ -2552,7 +2550,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, } if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { - arsta = (struct ath12k_sta *)peer->sta->drv_priv; + arsta = ath12k_sta_to_arsta(peer->sta); ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, ppdu_info); } else if ((ppdu_info->fc_valid) && diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index dbcbe7e0cd..3543fadac4 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -1054,7 +1054,7 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar, struct ieee80211_ampdu_params *params) { struct ath12k_base *ab = ar->ab; - struct ath12k_sta *arsta = (void *)params->sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret; @@ -1072,7 +1072,7 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, { struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer; - struct ath12k_sta *arsta = (void *)params->sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; bool active; int ret; @@ -1410,7 +1410,7 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar, } sta = peer->sta; - arsta = (struct ath12k_sta *)sta->drv_priv; + arsta = ath12k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); @@ -3529,23 +3529,13 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list) { struct ath12k_base *ab = ar->ab; - u16 msdu_len, peer_id; + u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); - peer_id = ath12k_dp_rx_h_peer_id(ab, desc); - - spin_lock(&ab->base_lock); - if (!ath12k_peer_find_by_id(ab, peer_id)) { - spin_unlock(&ab->base_lock); - ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n", - peer_id); - return -EINVAL; - } - spin_unlock(&ab->base_lock); if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { /* First buffer will be freed by the caller, so deduct it's length */ @@ -3759,7 +3749,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, continue; } - desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc; + desc_info = err_info.rx_desc; /* retry manual desc retrieval if hw cc is not done */ if (!desc_info) { diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 16d889fc20..492ca6ce67 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -106,11 +106,10 @@ static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp * return desc; } -static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd, +static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, + struct hal_tx_msdu_ext_desc *tcl_ext_cmd, struct hal_tx_info *ti) { - struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd; - tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr, HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO); tcl_ext_cmd->info1 = le32_encode_bits(0x0, @@ -402,7 +401,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab, } } - ieee80211_tx_status(ar->hw, msdu); + ieee80211_tx_status_skb(ar->hw, msdu); } static void @@ -499,7 +498,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, * Might end up reporting it out-of-band from HTT stats. */ - ieee80211_tx_status(ar->hw, msdu); + ieee80211_tx_status_skb(ar->hw, msdu); exit: rcu_read_unlock(); diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c index e7a150e715..b896dfe66d 100644 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -385,13 +385,13 @@ static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) { return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_INFO12_MIMO_SS_BITMAP); + RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP); } static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) { return le16_get_bits(desc->u.qcn9274.msdu_end.info5, - RX_MSDU_END_INFO5_TID); + RX_MSDU_END_QCN9274_INFO5_TID); } static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) @@ -819,13 +819,13 @@ static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) { return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_INFO12_MIMO_SS_BITMAP); + RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP); } static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) { - return le16_get_bits(desc->u.wcn7850.msdu_end.info5, - RX_MSDU_END_INFO5_TID); + return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, + RX_MPDU_START_INFO2_TID); } static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) @@ -837,7 +837,7 @@ static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end, - sizeof(struct rx_msdu_end_qcn9274)); + sizeof(struct rx_msdu_end_wcn7850)); } static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) @@ -889,8 +889,8 @@ static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) static bool ath12k_hw_wcn7850_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) { - return __le16_to_cpu(desc->u.wcn7850.msdu_end.info5) & - RX_MSDU_END_INFO5_DA_IS_MCBC; + return __le32_to_cpu(desc->u.wcn7850.msdu_end.info13) & + RX_MSDU_END_INFO13_MCAST_BCAST; } static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c index ee61a6462f..f6afbd8196 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.c +++ b/drivers/net/wireless/ath/ath12k/hal_rx.c @@ -713,8 +713,6 @@ void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, { struct hal_rx_reo_queue_ext *ext_desc; - memset(qdesc, 0, sizeof(*qdesc)); - ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED, HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0); diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h index 54490cdb63..4095fd82b1 100644 --- a/drivers/net/wireless/ath/ath12k/hif.h +++ b/drivers/net/wireless/ath/ath12k/hif.h @@ -10,17 +10,17 @@ #include "core.h" struct ath12k_hif_ops { - u32 (*read32)(struct ath12k_base *sc, u32 address); - void (*write32)(struct ath12k_base *sc, u32 address, u32 data); - void (*irq_enable)(struct ath12k_base *sc); - void (*irq_disable)(struct ath12k_base *sc); - int (*start)(struct ath12k_base *sc); - void (*stop)(struct ath12k_base *sc); - int (*power_up)(struct ath12k_base *sc); - void (*power_down)(struct ath12k_base *sc); + u32 (*read32)(struct ath12k_base *ab, u32 address); + void (*write32)(struct ath12k_base *ab, u32 address, u32 data); + void (*irq_enable)(struct ath12k_base *ab); + void (*irq_disable)(struct ath12k_base *ab); + int (*start)(struct ath12k_base *ab); + void (*stop)(struct ath12k_base *ab); + int (*power_up)(struct ath12k_base *ab); + void (*power_down)(struct ath12k_base *ab); int (*suspend)(struct ath12k_base *ab); int (*resume)(struct ath12k_base *ab); - int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id, + int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index 5991cc91cd..b55cf33e37 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -886,7 +886,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = false, .idle_ps = false, @@ -907,6 +908,12 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_ops = &hal_qcn9274_ops, .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01), + + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + + .rddm_size = 0, }, { .name = "wcn7850 hw2.0", @@ -942,7 +949,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .rx_mac_buf_ring = true, .vdev_start_delay = true, - .interface_modes = BIT(NL80211_IFTYPE_STATION), + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), .supports_monitor = false, .idle_ps = true, @@ -964,6 +972,12 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) | BIT(CNSS_PCIE_PERST_NO_PULL_V01), + + .rfkill_pin = 48, + .rfkill_cfg = 0, + .rfkill_on_level = 1, + + .rddm_size = 0x780000, }, { .name = "qcn9274 hw2.0", @@ -998,7 +1012,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = false, .idle_ps = false, @@ -1019,6 +1034,12 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_ops = &hal_qcn9274_ops, .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01), + + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + + .rddm_size = 0, }, }; diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index e6c4223c28..2d6427cf41 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -186,6 +186,12 @@ struct ath12k_hw_params { const struct hal_ops *hal_ops; u64 qmi_cnss_feature_bitmap; + + u32 rfkill_pin; + u32 rfkill_cfg; + u32 rfkill_on_level; + + u32 rddm_size; }; struct ath12k_hw_ops { diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 88346e66bb..b698e55a5b 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -523,7 +523,7 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath12k_vif_iter *arvif_iter = data; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; @@ -1208,7 +1208,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); u32 aid; lockdep_assert_held(&ar->conf_mutex); @@ -1236,7 +1236,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; - struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); const u8 *rsnie = NULL; const u8 *wpaie = NULL; @@ -1294,7 +1294,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; @@ -1357,7 +1357,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar, struct ath12k_wmi_peer_assoc_arg *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -1518,7 +1518,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, struct ath12k_wmi_peer_assoc_arg *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u16 *vht_mcs_mask; @@ -1793,7 +1793,7 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: @@ -1991,7 +1991,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2140,7 +2140,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; const struct ieee80211_eht_mcs_nss_supp_bw *bw; - struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); u32 *rx_mcs, *tx_mcs; if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht) @@ -2266,7 +2266,7 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_wmi_peer_assoc_arg peer_arg; struct ieee80211_sta *ap_sta; struct ath12k_peer *peer; @@ -2360,7 +2360,7 @@ static void ath12k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); @@ -2407,7 +2407,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); const struct ieee80211_supported_band *sband; u8 basic_rate_idx; int hw_rate_code; @@ -2525,7 +2525,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON) { param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; - param_value = WMI_BEACON_STAGGERED_MODE; + param_value = WMI_BEACON_BURST_MODE; ret = ath12k_wmi_pdev_set_param(ar, param_id, param_value, ar->pdev->pdev_id); if (ret) @@ -2533,7 +2533,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, arvif->vdev_id); else ath12k_dbg(ar->ab, ATH12K_DBG_MAC, - "Set staggered beacon mode for VDEV: %d\n", + "Set burst beacon mode for VDEV: %d\n", arvif->vdev_id); ret = ath12k_mac_setup_bcn_tmpl(arvif); @@ -2761,9 +2761,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_FILS_DISCOVERY || - changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) - ath12k_mac_fils_discovery(arvif, info); + ath12k_mac_fils_discovery(arvif, info); if (changed & BSS_CHANGED_EHT_PUNCTURING) arvif->punct_bitmap = info->eht_puncturing; @@ -2780,18 +2778,21 @@ void __ath12k_mac_scan_finish(struct ath12k *ar) break; case ATH12K_SCAN_RUNNING: case ATH12K_SCAN_ABORTING: + if (ar->scan.is_roc && ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); + fallthrough; + case ATH12K_SCAN_STARTING: if (!ar->scan.is_roc) { struct cfg80211_scan_info info = { - .aborted = (ar->scan.state == - ATH12K_SCAN_ABORTING), + .aborted = ((ar->scan.state == + ATH12K_SCAN_ABORTING) || + (ar->scan.state == + ATH12K_SCAN_STARTING)), }; ieee80211_scan_completed(ar->hw, &info); - } else if (ar->scan.roc_notify) { - ieee80211_remain_on_channel_expired(ar->hw); } - fallthrough; - case ATH12K_SCAN_STARTING: + ar->scan.state = ATH12K_SCAN_IDLE; ar->scan_channel = NULL; ar->scan.roc_freq = 0; @@ -3246,7 +3247,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { - arsta = (struct ath12k_sta *)sta->drv_priv; + arsta = ath12k_sta_to_arsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: @@ -3419,7 +3420,7 @@ static int ath12k_station_disassoc(struct ath12k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); @@ -3636,7 +3637,7 @@ static int ath12k_mac_station_add(struct ath12k *ar, { struct ath12k_base *ab = ar->ab; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k_wmi_peer_create_arg peer_param; int ret; @@ -3743,7 +3744,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k_peer *peer; int ret = 0; @@ -3855,7 +3856,7 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; s16 txpwr; @@ -3891,8 +3892,8 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, u32 changed) { struct ath12k *ar = hw->priv; - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_peer *peer; u32 bw, smps; @@ -4018,7 +4019,7 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct wmi_wmm_params_arg *p = NULL; int ret; @@ -4553,7 +4554,50 @@ static void ath12k_mac_copy_eht_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg * } } -static void ath12k_mac_copy_eht_cap(struct ath12k_band_cap *band_cap, +static void +ath12k_mac_filter_eht_cap_mesh(struct ieee80211_eht_cap_elem_fixed + *eht_cap_elem) +{ + u8 m; + + m = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS; + eht_cap_elem->mac_cap_info[0] &= ~m; + + m = IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO; + eht_cap_elem->phy_cap_info[0] &= ~m; + + m = IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + eht_cap_elem->phy_cap_info[3] &= ~m; + + m = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI; + eht_cap_elem->phy_cap_info[4] &= ~m; + + m = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK; + eht_cap_elem->phy_cap_info[5] &= ~m; + + m = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK; + eht_cap_elem->phy_cap_info[6] &= ~m; + + m = IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; + eht_cap_elem->phy_cap_info[7] &= ~m; +} + +static void ath12k_mac_copy_eht_cap(struct ath12k *ar, + struct ath12k_band_cap *band_cap, struct ieee80211_he_cap_elem *he_cap_elem, int iftype, struct ieee80211_sta_eht_cap *eht_cap) @@ -4561,6 +4605,10 @@ static void ath12k_mac_copy_eht_cap(struct ath12k_band_cap *band_cap, struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem; memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap)); + + if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map))) + return; + eht_cap->has_eht = true; memcpy(eht_cap_elem->mac_cap_info, band_cap->eht_cap_mac_info, sizeof(eht_cap_elem->mac_cap_info)); @@ -4586,6 +4634,9 @@ static void ath12k_mac_copy_eht_cap(struct ath12k_band_cap *band_cap, IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); break; + case NL80211_IFTYPE_MESH_POINT: + ath12k_mac_filter_eht_cap_mesh(eht_cap_elem); + break; default: break; } @@ -4626,7 +4677,7 @@ static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar, data[idx].he_6ghz_capa.capa = ath12k_mac_setup_he_6ghz_cap(cap, band_cap); } - ath12k_mac_copy_eht_cap(band_cap, &he_cap->he_cap_elem, i, + ath12k_mac_copy_eht_cap(ar, band_cap, &he_cap->he_cap_elem, i, &data[idx].eht_cap); idx++; } @@ -4647,8 +4698,8 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, ar->mac.iftype[band], band); sband = &ar->mac.sbands[band]; - sband->iftype_data = ar->mac.iftype[band]; - sband->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { @@ -4657,8 +4708,8 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, ar->mac.iftype[band], band); sband = &ar->mac.sbands[band]; - sband->iftype_data = ar->mac.iftype[band]; - sband->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && @@ -4668,8 +4719,8 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, ar->mac.iftype[band], band); sband = &ar->mac.sbands[band]; - sband->iftype_data = ar->mac.iftype[band]; - sband->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } } @@ -5108,6 +5159,63 @@ err: return ret; } +int ath12k_mac_rfkill_config(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + u32 param; + int ret; + + if (ab->hw_params->rfkill_pin == 0) + return -EOPNOTSUPP; + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d", + ab->hw_params->rfkill_pin, ab->hw_params->rfkill_cfg, + ab->hw_params->rfkill_on_level); + + param = u32_encode_bits(ab->hw_params->rfkill_on_level, + WMI_RFKILL_CFG_RADIO_LEVEL) | + u32_encode_bits(ab->hw_params->rfkill_pin, + WMI_RFKILL_CFG_GPIO_PIN_NUM) | + u32_encode_bits(ab->hw_params->rfkill_cfg, + WMI_RFKILL_CFG_PIN_AS_GPIO); + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG, + param, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ab, + "failed to set rfkill config 0x%x: %d\n", + param, ret); + return ret; + } + + return 0; +} + +int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable) +{ + enum wmi_rfkill_enable_radio param; + int ret; + + if (enable) + param = WMI_RFKILL_ENABLE_RADIO_ON; + else + param = WMI_RFKILL_ENABLE_RADIO_OFF; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac %d rfkill enable %d", + ar->pdev_idx, param); + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE, + param, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n", + param, ret); + return ret; + } + + return 0; +} + static void ath12k_mac_op_stop(struct ieee80211_hw *hw) { struct ath12k *ar = hw->priv; @@ -5128,6 +5236,7 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw) cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ar->ab->rfkill_work); spin_lock_bh(&ar->data_lock); list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { @@ -5788,14 +5897,68 @@ static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, mutex_unlock(&ar->conf_mutex); } +static enum wmi_phy_mode +ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, + enum wmi_phy_mode mode, + enum nl80211_band band, + enum nl80211_iftype type) +{ + struct ieee80211_sta_eht_cap *eht_cap; + enum wmi_phy_mode down_mode; + + if (mode < MODE_11BE_EHT20) + return mode; + + eht_cap = &ar->mac.iftype[band][type].eht_cap; + if (eht_cap->has_eht) + return mode; + + switch (mode) { + case MODE_11BE_EHT20: + down_mode = MODE_11AX_HE20; + break; + case MODE_11BE_EHT40: + down_mode = MODE_11AX_HE40; + break; + case MODE_11BE_EHT80: + down_mode = MODE_11AX_HE80; + break; + case MODE_11BE_EHT80_80: + down_mode = MODE_11AX_HE80_80; + break; + case MODE_11BE_EHT160: + case MODE_11BE_EHT160_160: + case MODE_11BE_EHT320: + down_mode = MODE_11AX_HE160; + break; + case MODE_11BE_EHT20_2G: + down_mode = MODE_11AX_HE20_2G; + break; + case MODE_11BE_EHT40_2G: + down_mode = MODE_11AX_HE40_2G; + break; + default: + down_mode = mode; + break; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac vdev start phymode %s downgrade to %s\n", + ath12k_mac_phymode_str(mode), + ath12k_mac_phymode_str(down_mode)); + + return down_mode; +} + static int ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef, + struct ieee80211_chanctx_conf *ctx, bool restart) { struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; struct wmi_vdev_start_req_arg arg = {}; + const struct cfg80211_chan_def *chandef = &ctx->def; int he_support = arvif->vif->bss_conf.he_support; int ret; @@ -5813,6 +5976,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, arg.band_center_freq2 = chandef->center_freq2; arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; + arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode, + chandef->chan->band, + arvif->vif->type); arg.min_power = 0; arg.max_power = chandef->chan->max_power * 2; arg.max_reg_power = chandef->chan->max_reg_power * 2; @@ -5829,6 +5995,8 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, /* For now allow DFS for AP mode */ arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + arg.freq2_radar = ctx->radar_enabled; + arg.passive = arg.chan_radar; spin_lock_bh(&ab->base_lock); @@ -5936,15 +6104,15 @@ err: } static int ath12k_mac_vdev_start(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef) + struct ieee80211_chanctx_conf *ctx) { - return ath12k_mac_vdev_start_restart(arvif, chandef, false); + return ath12k_mac_vdev_start_restart(arvif, ctx, false); } static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef) + struct ieee80211_chanctx_conf *ctx) { - return ath12k_mac_vdev_start_restart(arvif, chandef, true); + return ath12k_mac_vdev_start_restart(arvif, ctx, true); } struct ath12k_mac_change_chanctx_arg { @@ -6000,7 +6168,7 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, lockdep_assert_held(&ar->conf_mutex); for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; + arvif = ath12k_vif_to_arvif(vifs[i].vif); if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) monitor_vif = true; @@ -6034,18 +6202,33 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, /* TODO: Update ar->rx_channel */ for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; + arvif = ath12k_vif_to_arvif(vifs[i].vif); if (WARN_ON(!arvif->is_started)) continue; - if (WARN_ON(!arvif->is_up)) - continue; + /* Firmware expect vdev_restart only if vdev is up. + * If vdev is down then it expect vdev_stop->vdev_start. + */ + if (arvif->is_up) { + ret = ath12k_mac_vdev_restart(arvif, vifs[i].new_ctx); + if (ret) { + ath12k_warn(ab, "failed to restart vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } else { + ret = ath12k_mac_vdev_stop(arvif); + if (ret) { + ath12k_warn(ab, "failed to stop vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } - ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def); - if (ret) { - ath12k_warn(ab, "failed to restart vdev %d: %d\n", - arvif->vdev_id, ret); + ret = ath12k_mac_vdev_start(arvif, vifs[i].new_ctx); + if (ret) + ath12k_warn(ab, "failed to start vdev %d: %d\n", + arvif->vdev_id, ret); continue; } @@ -6118,7 +6301,8 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) goto unlock; - if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) + if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || + changed & IEEE80211_CHANCTX_CHANGE_RADAR) ath12k_mac_update_active_vif_chan(ar, ctx); /* TODO: Recalc radar detection */ @@ -6132,13 +6316,13 @@ static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; if (WARN_ON(arvif->is_started)) return -EBUSY; - ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def); + ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx); if (ret) { ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, @@ -6168,7 +6352,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; struct ath12k_wmi_peer_create_arg param; @@ -6196,8 +6380,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, } if (ab->hw_params->vdev_start_delay && - (arvif->vdev_type == WMI_VDEV_TYPE_AP || - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) { + arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { param.vdev_id = arvif->vdev_id; param.peer_type = WMI_PEER_TYPE_DEFAULT; param.peer_addr = ar->mac_addr; @@ -6218,7 +6402,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, goto out; } - ret = ath12k_mac_vdev_start(arvif, &ctx->def); + ret = ath12k_mac_vdev_start(arvif, ctx); if (ret) { ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, @@ -6247,7 +6431,7 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; mutex_lock(&ar->conf_mutex); @@ -6578,7 +6762,7 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { struct ath12k_vif *arvif = data; - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k *ar = arvif->ar; spin_lock_bh(&ar->data_lock); @@ -6610,7 +6794,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath12k *ar = arvif->ar; enum nl80211_band band; @@ -6867,7 +7051,7 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); @@ -7231,6 +7415,11 @@ static int __ath12k_mac_register(struct ath12k *ar) ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes; + if (ar->hw->wiphy->bands[NL80211_BAND_2GHZ] && + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] && + ar->hw->wiphy->bands[NL80211_BAND_6GHZ]) + ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS); + ieee80211_hw_set(ar->hw, SIGNAL_DBM); ieee80211_hw_set(ar->hw, SUPPORTS_PS); ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index 7b16b70df4..59b4e8f5ee 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -73,4 +73,6 @@ int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw); enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw); enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher); +int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable); +int ath12k_mac_rfkill_config(struct ath12k *ar); #endif diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c index f83d3e09ae..39e640293c 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.c +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@ -366,6 +366,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) mhi_ctrl->fw_image = ab_pci->amss_path; mhi_ctrl->regs = ab->mem; mhi_ctrl->reg_len = ab->mem_len; + mhi_ctrl->rddm_size = ab->hw_params->rddm_size; ret = ath12k_mhi_get_msi(ab_pci); if (ret) { diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index fae5dfd6e9..3006cd3fbe 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -424,12 +424,12 @@ static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } -static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc) +static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) { int i; for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath12k_pci_ext_grp_disable(irq_grp); diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index b2db0436bd..f6e949c618 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -2213,6 +2213,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) struct qmi_txn txn = {}; unsigned int board_id = ATH12K_BOARD_ID_DEFAULT; int ret = 0; + int r; int i; memset(&req, 0, sizeof(req)); @@ -2297,6 +2298,10 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) ab->qmi.target.fw_build_timestamp, ab->qmi.target.fw_build_id); + r = ath12k_core_check_smbios(ab); + if (r) + ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); + out: return ret; } @@ -2535,6 +2540,7 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab) dma_free_coherent(ab->dev, m3_mem->size, m3_mem->vaddr, m3_mem->paddr); m3_mem->vaddr = NULL; + m3_mem->size = 0; } static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) @@ -3088,3 +3094,9 @@ void ath12k_qmi_deinit_service(struct ath12k_base *ab) ath12k_qmi_m3_free(ab); ath12k_qmi_free_target_mem_chunk(ab); } + +void ath12k_qmi_free_resource(struct ath12k_base *ab) +{ + ath12k_qmi_free_target_mem_chunk(ab); + ath12k_qmi_m3_free(ab); +} diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h index 15944f5f33..e20d6511d1 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.h +++ b/drivers/net/wireless/ath/ath12k/qmi.h @@ -564,5 +564,6 @@ int ath12k_qmi_firmware_start(struct ath12k_base *ab, void ath12k_qmi_firmware_stop(struct ath12k_base *ab); void ath12k_qmi_deinit_service(struct ath12k_base *ab); int ath12k_qmi_init_service(struct ath12k_base *ab); +void ath12k_qmi_free_resource(struct ath12k_base *ab); #endif diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c index 6ede91ebc8..5c006256c8 100644 --- a/drivers/net/wireless/ath/ath12k/reg.c +++ b/drivers/net/wireless/ath/ath12k/reg.c @@ -314,6 +314,19 @@ static u32 ath12k_map_fw_reg_flags(u16 reg_flags) return flags; } +static u32 ath12k_map_fw_phy_flags(u32 phy_flags) +{ + u32 flags = 0; + + if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11AX) + flags |= NL80211_RRF_NO_HE; + + if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11BE) + flags |= NL80211_RRF_NO_EHT; + + return flags; +} + static bool ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1, struct ieee80211_reg_rule *rule2) @@ -638,6 +651,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab, } flags |= ath12k_map_fw_reg_flags(reg_rule->flags); + flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap); ath12k_reg_update_rule(tmp_regd->reg_rules + i, reg_rule->start_freq, diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h index 56d009a472..35569f0304 100644 --- a/drivers/net/wireless/ath/ath12k/reg.h +++ b/drivers/net/wireless/ath/ath12k/reg.h @@ -83,6 +83,12 @@ struct ath12k_reg_info { [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; }; +/* Phy bitmaps */ +enum ath12k_reg_phy_bitmap { + ATH12K_REG_PHY_BITMAP_NO11AX = BIT(5), + ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6), +}; + void ath12k_reg_init(struct ath12k *ar); void ath12k_reg_free(struct ath12k_base *ab); void ath12k_regd_update_work(struct work_struct *work); diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h index bfa87cb8d0..c4058abc51 100644 --- a/drivers/net/wireless/ath/ath12k/rx_desc.h +++ b/drivers/net/wireless/ath/ath12k/rx_desc.h @@ -627,17 +627,18 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0) #define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1) -#define RX_MSDU_END_INFO5_TO_DS BIT(2) -#define RX_MSDU_END_INFO5_TID GENMASK(6, 3) #define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7) #define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8) #define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9) #define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10) #define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12) #define RX_MSDU_END_INFO5_LAST_MSDU BIT(13) -#define RX_MSDU_END_INFO5_FROM_DS BIT(14) #define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15) +#define RX_MSDU_END_QCN9274_INFO5_TO_DS BIT(2) +#define RX_MSDU_END_QCN9274_INFO5_TID GENMASK(6, 3) +#define RX_MSDU_END_QCN9274_INFO5_FROM_DS BIT(14) + #define RX_MSDU_END_INFO6_MSDU_DROP BIT(0) #define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1) #define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6) @@ -650,14 +651,15 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO7_AGGR_COUNT GENMASK(7, 0) #define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8) #define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9) -#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) -#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11) -#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12) -#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13) -#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14) -#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15) -#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0) +#define RX_MSDU_END_QCN9274_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) +#define RX_MSDU_END_QCN9274_INFO7_MSDU_LIMIT_ERROR BIT(11) +#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_TIMEOUT BIT(12) +#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_INVALID BIT(13) +#define RX_MSDU_END_QCN9274_INFO7_CCE_MATCH BIT(14) +#define RX_MSDU_END_QCN9274_INFO7_AMSDU_PARSER_ERR BIT(15) + +#define RX_MSDU_END_QCN9274_INFO8_KEY_ID GENMASK(7, 0) #define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6) #define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15) @@ -698,8 +700,9 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO12_RATE_MCS GENMASK(17, 14) #define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18) #define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21) -#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) -#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31) + +#define RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) +#define RX_MSDU_END_QCN9274_INFO12_MIMO_DONE_COPY BIT(31) #define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0) #define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2) @@ -714,7 +717,6 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO13_EOSP BIT(11) #define RX_MSDU_END_INFO13_A_MSDU_ERROR BIT(12) #define RX_MSDU_END_INFO13_ORDER BIT(14) -#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15) #define RX_MSDU_END_INFO13_OVERFLOW_ERR BIT(16) #define RX_MSDU_END_INFO13_MSDU_LEN_ERR BIT(17) #define RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL BIT(18) @@ -732,6 +734,8 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30) #define RX_MSDU_END_INFO13_FCS_ERR BIT(31) +#define RX_MSDU_END_QCN9274_INFO13_WIFI_PARSER_ERR BIT(15) + #define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10) #define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13) #define RX_MSDU_END_INFO14_MSDU_DONE BIT(31) @@ -782,6 +786,65 @@ struct rx_msdu_end_qcn9274 { __le32 info14; } __packed; +/* These macro definitions are only used for WCN7850 */ +#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0) + +#define RX_MSDU_END_WCN7850_INFO5_MSDU_LIMIT_ERR BIT(2) +#define RX_MSDU_END_WCN7850_INFO5_IDX_TIMEOUT BIT(3) +#define RX_MSDU_END_WCN7850_INFO5_IDX_INVALID BIT(4) +#define RX_MSDU_END_WCN7850_INFO5_WIFI_PARSE_ERR BIT(5) +#define RX_MSDU_END_WCN7850_INFO5_AMSDU_PARSER_ERR BIT(6) +#define RX_MSDU_END_WCN7850_INFO5_TCPUDP_CSUM_FAIL_CPY BIT(14) + +#define RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP GENMASK(31, 24) + +#define RX_MSDU_END_WCN7850_INFO13_FRAGMENT_FLAG BIT(13) +#define RX_MSDU_END_WCN7850_INFO13_CCE_MATCH BIT(15) + +struct rx_msdu_end_wcn7850 { + __le16 info0; + __le16 phy_ppdu_id; + __le16 ip_hdr_cksum; + __le16 info1; + __le16 info2; + __le16 cumulative_l3_checksum; + __le32 rule_indication0; + __le32 rule_indication1; + __le16 info3; + __le16 l3_type; + __le32 ipv6_options_crc; + __le32 tcp_seq_num; + __le32 tcp_ack_num; + __le16 info4; + __le16 window_size; + __le16 tcp_udp_chksum; + __le16 info5; + __le16 sa_idx; + __le16 da_idx_or_sw_peer_id; + __le32 info6; + __le32 fse_metadata; + __le16 cce_metadata; + __le16 sa_sw_peer_id; + __le16 info7; + __le16 rsvd0; + __le16 cumulative_l4_checksum; + __le16 cumulative_ip_length; + __le32 info9; + __le32 info10; + __le32 info11; + __le32 toeplitz_hash_2_or_4; + __le32 flow_id_toeplitz; + __le32 info12; + __le32 ppdu_start_timestamp_31_0; + __le32 ppdu_start_timestamp_63_32; + __le32 phy_meta_data; + __le16 vlan_ctag_ci; + __le16 vlan_stag_ci; + __le32 rsvd[3]; + __le32 info13; + __le32 info14; +} __packed; + /* rx_msdu_end * * rxpcu_mpdu_filter_in_category @@ -1410,7 +1473,7 @@ struct rx_pkt_hdr_tlv { struct hal_rx_desc_wcn7850 { __le64 msdu_end_tag; - struct rx_msdu_end_qcn9274 msdu_end; + struct rx_msdu_end_wcn7850 msdu_end; u8 rx_padding0[RX_BE_PADDING0_BYTES]; __le64 mpdu_start_tag; struct rx_mpdu_start_qcn9274 mpdu_start; diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index d217b70a7a..0e5bf5ce8d 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -152,6 +152,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_service_available_event) }, [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, + [WMI_TAG_RFKILL_EVENT] = { + .min_len = sizeof(struct wmi_rfkill_state_change_event) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, [WMI_TAG_HOST_SWFDA_EVENT] = { @@ -406,22 +408,22 @@ err_pull: int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, u32 cmd_id) { - struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab; + struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; might_sleep(); - wait_event_timeout(wmi_sc->tx_credits_wq, ({ + wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); - if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags)) + if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); }), WMI_SEND_TIMEOUT_HZ); if (ret == -EAGAIN) - ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); return ret; } @@ -725,10 +727,10 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk return 0; } -struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len) +struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; - struct ath12k_base *ab = wmi_sc->ab; + struct ath12k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); @@ -3469,7 +3471,7 @@ int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, int ath12k_wmi_cmd_init(struct ath12k_base *ab) { - struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab; + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; struct ath12k_wmi_init_cmd_arg arg = {}; if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, @@ -3478,9 +3480,9 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) ab->hw_params->wmi_init(ab, &arg.res_cfg); - arg.num_mem_chunks = wmi_sc->num_mem_chunks; - arg.hw_mode_id = wmi_sc->preferred_hw_mode; - arg.mem_chunks = wmi_sc->mem_chunks; + arg.num_mem_chunks = wmi_ab->num_mem_chunks; + arg.hw_mode_id = wmi_ab->preferred_hw_mode; + arg.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params->single_pdev_only) arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; @@ -3488,7 +3490,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.num_band_to_mac = ab->num_radios; ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); - return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg); + return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); } int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, @@ -4159,14 +4161,22 @@ static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band, __le32 cap_info_internal) { struct ath12k_band_cap *cap_band = &pdev->cap.band[band]; + u32 support_320mhz; u8 i; + if (band == NL80211_BAND_6GHZ) + support_320mhz = cap_band->eht_cap_phy_info[0] & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++) cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]); for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++) cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]); + if (band == NL80211_BAND_6GHZ) + cap_band->eht_cap_phy_info[0] |= support_320mhz; + cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]); cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]); if (band != NL80211_BAND_2GHZ) { @@ -4188,10 +4198,19 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, const struct ath12k_wmi_caps_ext_params *caps, struct ath12k_pdev *pdev) { - u32 bands; + struct ath12k_band_cap *cap_band; + u32 bands, support_320mhz; int i; if (ab->hw_params->single_pdev_only) { + if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) { + support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + cap_band = &pdev->cap.band[NL80211_BAND_6GHZ]; + cap_band->eht_cap_phy_info[0] |= support_320mhz; + return 0; + } + for (i = 0; i < ab->fw_pdev_count; i++) { struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i]; @@ -4247,7 +4266,8 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, return -EPROTO; if (ab->hw_params->single_pdev_only) { - if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id)) + if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) && + caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE) return 0; } else { for (i = 0; i < ab->num_radios; i++) { @@ -4591,10 +4611,11 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, } ath12k_dbg(ab, ATH12K_DBG_WMI, - "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", + "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x", __func__, reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2g, reg_info->max_bw_2g, - reg_info->min_bw_5g, reg_info->max_bw_5g); + reg_info->min_bw_5g, reg_info->max_bw_5g, + reg_info->phybitmap); ath12k_dbg(ab, ATH12K_DBG_WMI, "num_2g_reg_rules %d num_5g_reg_rules %d", @@ -5401,7 +5422,13 @@ static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, static bool ath12k_reg_is_world_alpha(char *alpha) { - return alpha[0] == '0' && alpha[1] == '0'; + if (alpha[0] == '0' && alpha[1] == '0') + return true; + + if (alpha[0] == 'n' && alpha[1] == 'a') + return true; + + return false; } static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) @@ -5873,8 +5900,9 @@ exit: rcu_read_unlock(); } -static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, - u32 vdev_id) +static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab, + u32 vdev_id, + enum ath12k_scan_state state) { int i; struct ath12k_pdev *pdev; @@ -5886,7 +5914,7 @@ static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, ar = pdev->ar; spin_lock_bh(&ar->data_lock); - if (ar->scan.state == ATH12K_SCAN_ABORTING && + if (ar->scan.state == state && ar->scan.vdev_id == vdev_id) { spin_unlock_bh(&ar->data_lock); return ar; @@ -5916,10 +5944,15 @@ static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) * aborting scan's vdev id matches this event info. */ if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && - le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) - ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id)); - else + le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) { + ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), + ATH12K_SCAN_ABORTING); + if (!ar) + ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), + ATH12K_SCAN_RUNNING); + } else { ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); + } if (!ar) { ath12k_warn(ab, "Received scan event for unknown vdev"); @@ -6595,6 +6628,40 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, kfree(tb); } +static void ath12k_rfkill_state_change_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_rfkill_state_change_event *ev; + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_RFKILL_EVENT]; + if (!ev) { + kfree(tb); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", + le32_to_cpu(ev->gpio_pin_num), + le32_to_cpu(ev->int_type), + le32_to_cpu(ev->radio_state)); + + spin_lock_bh(&ab->base_lock); + ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON)); + spin_unlock_bh(&ab->base_lock); + + queue_work(ab->workqueue, &ab->rfkill_work); + kfree(tb); +} + static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -6687,6 +6754,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: ath12k_probe_resp_tx_status_event(ab, skb); break; + case WMI_RFKILL_STATE_CHANGE_EVENTID: + ath12k_rfkill_state_change_event(ab, skb); + break; /* add Unsupported events here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index c75a6fa1f7..629373d674 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -2158,6 +2158,9 @@ enum wmi_tlv_service { WMI_MAX_EXT_SERVICE = 256, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281, + + WMI_TLV_SERVICE_11BE = 289, + WMI_MAX_EXT2_SERVICE, }; @@ -4793,6 +4796,31 @@ struct ath12k_wmi_base { #define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024) +enum wmi_sys_cap_info_flags { + WMI_SYS_CAP_INFO_RXTX_LED = BIT(0), + WMI_SYS_CAP_INFO_RFKILL = BIT(1), +}; + +#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0) +#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6) +#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7) + +enum wmi_rfkill_enable_radio { + WMI_RFKILL_ENABLE_RADIO_ON = 0, + WMI_RFKILL_ENABLE_RADIO_OFF = 1, +}; + +enum wmi_rfkill_radio_state { + WMI_RFKILL_RADIO_STATE_OFF = 1, + WMI_RFKILL_RADIO_STATE_ON = 2, +}; + +struct wmi_rfkill_state_change_event { + __le32 gpio_pin_num; + __le32 int_type; + __le32 radio_state; +} __packed; + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index c59c144831..9f534ed2fb 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -230,13 +230,13 @@ ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) } static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset) { - struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; + struct ath5k_hw *ah = hw_priv; return ath5k_hw_reg_read(ah, reg_offset); } static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) { - struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; + struct ath5k_hw *ah = hw_priv; ath5k_hw_reg_write(ah, val, reg_offset); } @@ -1770,7 +1770,7 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, ah->stats.antenna_tx[0]++; /* invalid */ trace_ath5k_tx_complete(ah, skb, txq, ts); - ieee80211_tx_status(ah->hw, skb); + ieee80211_tx_status_skb(ah->hw, skb); } static void diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index 33e9928af3..4390529847 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -131,8 +131,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led, int err; led->ah = ah; - strncpy(led->name, name, sizeof(led->name)); - led->name[sizeof(led->name)-1] = 0; + strscpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = trigger; led->led_dev.brightness_set = ath5k_led_brightness_set; diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 86b8cb975b..b51fce5ae2 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); /* return bus cachesize in 4B word units */ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz) { - struct ath5k_hw *ah = (struct ath5k_hw *) common->priv; + struct ath5k_hw *ah = common->priv; u8 u8tmp; pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp); @@ -76,7 +76,7 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz) static bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) { - struct ath5k_hw *ah = (struct ath5k_hw *) common->ah; + struct ath5k_hw *ah = common->ah; u32 status, timeout; /* diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 0c2b8b1a10..e37db4af33 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -1118,9 +1118,9 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, ath6kl_band_2ghz.ht_cap.ht_supported) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT); - mutex_lock(&vif->wdev.mtx); + wiphy_lock(vif->ar->wiphy); cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0); - mutex_unlock(&vif->wdev.mtx); + wiphy_unlock(vif->ar->wiphy); } static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, @@ -2954,7 +2954,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, } static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *beacon) + struct cfg80211_ap_update *params) { struct ath6kl_vif *vif = netdev_priv(dev); @@ -2964,7 +2964,7 @@ static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev, if (vif->next_mode != AP_NETWORK) return -EOPNOTSUPP; - return ath6kl_set_ies(vif, beacon); + return ath6kl_set_ies(vif, ¶ms->beacon); } static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev, diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 201e455540..15f455adb8 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1677,7 +1677,7 @@ static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len) /* add "..." to the end of string */ trunc_len = strlen(trunc) + 1; - strncpy(buf + buf_len - trunc_len, trunc, trunc_len); + memcpy(buf + buf_len - trunc_len, trunc, trunc_len); return; } diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index d3aa9e7a37..8f9fe23e97 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -852,14 +852,14 @@ void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) void ath6kl_wakeup_event(void *dev) { - struct ath6kl *ar = (struct ath6kl *) dev; + struct ath6kl *ar = dev; wake_up(&ar->event_wq); } void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr) { - struct ath6kl *ar = (struct ath6kl *) devt; + struct ath6kl *ar = devt; ar->tx_pwr = tx_pwr; wake_up(&ar->event_wq); diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index a56fab6232..80e66acc5c 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -708,7 +708,7 @@ void ath6kl_tx_complete(struct htc_target *target, packet->endpoint >= ENDPOINT_MAX)) continue; - ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; + ath6kl_cookie = packet->pkt_cntxt; if (WARN_ON_ONCE(!ath6kl_cookie)) continue; diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index e150d82edd..0c47be06c1 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -57,8 +57,7 @@ config ATH9K_AHB config ATH9K_DEBUGFS bool "Atheros ath9k debugging" - depends on ATH9K && DEBUG_FS - select MAC80211_DEBUGFS + depends on ATH9K && DEBUG_FS && MAC80211_DEBUGFS select ATH9K_COMMON_DEBUG help Say Y, if you need access to ath9k's statistics for @@ -70,7 +69,6 @@ config ATH9K_DEBUGFS config ATH9K_STATION_STATISTICS bool "Detailed station statistics" depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS - select MAC80211_DEBUGFS default n help This option enables detailed statistics for association stations. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index a29c11f944..6274d16242 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -766,10 +766,10 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah, } } -static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, +static u32 ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, struct ath9k_channel *chan) { - int ret; + u32 ret; if (IS_CHAN_2GHZ(chan)) { if (IS_CHAN_HT40(chan)) @@ -791,7 +791,7 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, return ret; } -static int ar9561_hw_get_modes_txgain_index(struct ath_hw *ah, +static u32 ar9561_hw_get_modes_txgain_index(struct ath_hw *ah, struct ath9k_channel *chan) { if (IS_CHAN_2GHZ(chan)) { @@ -916,7 +916,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, * TXGAIN initvals. */ if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) { - int modes_txgain_index = 1; + u32 modes_txgain_index = 1; if (AR_SREV_9550(ah)) modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan); @@ -925,9 +925,6 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, modes_txgain_index = ar9561_hw_get_modes_txgain_index(ah, chan); - if (modes_txgain_index < 0) - return -EINVAL; - REG_WRITE_ARRAY(&ah->iniModesTxGain, modes_txgain_index, regWrites); } else { diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index e5414435b1..90cfe39aa4 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -1481,31 +1481,31 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface) { struct hif_device_usb *hif_dev = usb_get_intfdata(interface); struct htc_target *htc_handle = hif_dev->htc_handle; - int ret; const struct firmware *fw; + int ret; ret = ath9k_hif_usb_alloc_urbs(hif_dev); if (ret) return ret; - if (hif_dev->flags & HIF_USB_READY) { - /* request cached firmware during suspend/resume cycle */ - ret = request_firmware(&fw, hif_dev->fw_name, - &hif_dev->udev->dev); - if (ret) - goto fail_resume; - - hif_dev->fw_data = fw->data; - hif_dev->fw_size = fw->size; - ret = ath9k_hif_usb_download_fw(hif_dev); - release_firmware(fw); - if (ret) - goto fail_resume; - } else { - ath9k_hif_usb_dealloc_urbs(hif_dev); - return -EIO; + if (!(hif_dev->flags & HIF_USB_READY)) { + ret = -EIO; + goto fail_resume; } + /* request cached firmware during suspend/resume cycle */ + ret = request_firmware(&fw, hif_dev->fw_name, + &hif_dev->udev->dev); + if (ret) + goto fail_resume; + + hif_dev->fw_data = fw->data; + hif_dev->fw_size = fw->size; + ret = ath9k_hif_usb_download_fw(hif_dev); + release_firmware(fw); + if (ret) + goto fail_resume; + mdelay(100); ret = ath9k_htc_resume(htc_handle); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h index 5985aa15ca..b3e66b0485 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h @@ -126,7 +126,7 @@ struct hif_device_usb { struct usb_anchor reg_in_submitted; struct usb_anchor mgmt_submitted; struct sk_buff *remain_skb; - char fw_name[32]; + char fw_name[64]; int fw_minor_index; int rx_remain_len; int rx_pkt_len; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 672789e3c5..efcaeccb05 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -523,7 +523,7 @@ send_mac80211: } /* Send status to mac80211 */ - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv, @@ -652,9 +652,10 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event) struct ath9k_htc_tx_event *tx_pend; int i; - for (i = 0; i < txs->cnt; i++) { - WARN_ON(txs->cnt > HTC_MAX_TX_STATUS); + if (WARN_ON_ONCE(txs->cnt > HTC_MAX_TX_STATUS)) + return; + for (i = 0; i < txs->cnt; i++) { __txs = &txs->txstatus[i]; skb = ath9k_htc_tx_get_packet(priv, __txs); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 4e939dcac1..f15684379b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -94,7 +94,7 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_STATUS_EOSP)) { - ieee80211_tx_status(hw, skb); + ieee80211_tx_status_skb(hw, skb); return; } diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index e4eb666c6e..c4edf83559 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -178,7 +178,7 @@ static void carl9170_usb_tx_data_complete(struct urb *urb) switch (urb->status) { /* everything is fine */ case 0: - carl9170_tx_callback(ar, (void *)urb->context); + carl9170_tx_callback(ar, urb->context); break; /* disconnect */ @@ -369,7 +369,7 @@ void carl9170_usb_handle_tx_err(struct ar9170 *ar) struct urb *urb; while ((urb = usb_get_from_anchor(&ar->tx_err))) { - struct sk_buff *skb = (void *)urb->context; + struct sk_buff *skb = urb->context; carl9170_tx_drop(ar, skb); carl9170_tx_callback(ar, skb); @@ -397,7 +397,7 @@ static void carl9170_usb_tasklet(struct tasklet_struct *t) static void carl9170_usb_rx_complete(struct urb *urb) { - struct ar9170 *ar = (struct ar9170 *)urb->context; + struct ar9170 *ar = urb->context; int err; if (WARN_ON_ONCE(!ar)) @@ -559,7 +559,7 @@ static int carl9170_usb_flush(struct ar9170 *ar) int ret, err = 0; while ((urb = usb_get_from_anchor(&ar->tx_wait))) { - struct sk_buff *skb = (void *)urb->context; + struct sk_buff *skb = urb->context; carl9170_tx_drop(ar, skb); carl9170_tx_callback(ar, skb); usb_free_urb(urb); @@ -668,7 +668,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, memcpy(ar->cmd.data, payload, plen); spin_lock_bh(&ar->cmd_lock); - ar->readbuf = (u8 *)out; + ar->readbuf = out; ar->readlen = outlen; spin_unlock_bh(&ar->cmd_lock); diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 2788a1b06c..700da9f453 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -161,7 +161,7 @@ get_dfs_domain_radar_types(enum nl80211_dfs_regions region) struct channel_detector { struct list_head head; u16 freq; - struct pri_detector **detectors; + struct pri_detector *detectors[]; }; /* channel_detector_reset() - reset detector lines for a given channel */ @@ -183,14 +183,13 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd, if (cd == NULL) return; list_del(&cd->head); - if (cd->detectors) { - for (i = 0; i < dpd->num_radar_types; i++) { - struct pri_detector *de = cd->detectors[i]; - if (de != NULL) - de->exit(de); - } + + for (i = 0; i < dpd->num_radar_types; i++) { + struct pri_detector *de = cd->detectors[i]; + if (de != NULL) + de->exit(de); } - kfree(cd->detectors); + kfree(cd); } @@ -200,16 +199,12 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) u32 i; struct channel_detector *cd; - cd = kmalloc(sizeof(*cd), GFP_ATOMIC); + cd = kzalloc(struct_size(cd, detectors, dpd->num_radar_types), GFP_ATOMIC); if (cd == NULL) goto fail; INIT_LIST_HEAD(&cd->head); cd->freq = freq; - cd->detectors = kcalloc(dpd->num_radar_types, - sizeof(*cd->detectors), GFP_ATOMIC); - if (cd->detectors == NULL) - goto fail; for (i = 0; i < dpd->num_radar_types; i++) { const struct radar_detector_specs *rs = &dpd->radar_spec[i]; diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 9013f056ee..d405a4c340 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -180,7 +180,7 @@ static int wcn36xx_dxe_init_descs(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wc if (!wcn_ch->cpu_addr) return -ENOMEM; - cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr; + cur_dxe = wcn_ch->cpu_addr; cur_ctl = wcn_ch->head_blk_ctl; for (i = 0; i < wcn_ch->desc_num; i++) { @@ -453,7 +453,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) { - struct wcn36xx *wcn = (struct wcn36xx *)dev; + struct wcn36xx *wcn = dev; int int_src, int_reason; wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); @@ -541,7 +541,7 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev) { - struct wcn36xx *wcn = (struct wcn36xx *)dev; + struct wcn36xx *wcn = dev; wcn36xx_dxe_rx_frame(wcn); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 17e1919d1c..2cf86fc3f8 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -576,7 +576,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len) if (len < sizeof(*rsp)) return -EIO; - rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf; + rsp = buf; if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status) return -EIO; @@ -1025,7 +1025,7 @@ static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len) ret = wcn36xx_smd_rsp_status_check(buf, len); if (ret) return ret; - rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf; + rsp = buf; wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n", rsp->channel_number, rsp->status); return ret; @@ -1072,7 +1072,7 @@ static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len, if (ret) return ret; - rsp = (struct wcn36xx_hal_process_ptt_msg_rsp_msg *)buf; + rsp = buf; wcn36xx_dbg(WCN36XX_DBG_HAL, "process ptt msg responded with length %d\n", rsp->header.len); @@ -1131,7 +1131,7 @@ static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len) { struct wcn36xx_hal_update_scan_params_resp *rsp; - rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf; + rsp = buf; /* Remove the PNO version bit */ rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK)); @@ -1198,7 +1198,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn, if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf; + rsp = buf; if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { wcn36xx_warn("hal add sta self failure: %d\n", @@ -1316,7 +1316,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len) if (wcn36xx_smd_rsp_status_check(buf, len)) return -EIO; - rsp = (struct wcn36xx_hal_join_rsp_msg *)buf; + rsp = buf; wcn36xx_dbg(WCN36XX_DBG_HAL, "hal rsp join status %d tx_mgmt_power %d\n", @@ -1481,7 +1481,7 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn, if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf; + rsp = buf; params = &rsp->params; if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { @@ -1849,7 +1849,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf; + rsp = buf; params = &rsp->bss_rsp_params; if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { @@ -2476,7 +2476,7 @@ static int wcn36xx_smd_add_ba_session_rsp(void *buf, int len, u8 *session) if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_add_ba_session_rsp_msg *)buf; + rsp = buf; if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) return rsp->status; @@ -2654,7 +2654,7 @@ static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf; + rsp = buf; if (rsp->candidate_cnt < 1) return rsp->status ? rsp->status : -EINVAL; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index cf15cde2a3..2c1ed9e570 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -47,7 +47,7 @@ struct wcn36xx_fw_msg_status_rsp { struct wcn36xx_hal_ind_msg { struct list_head list; size_t msg_len; - u8 msg[]; + u8 msg[] __counted_by(msg_len); }; struct wcn36xx; diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c index 7ae14b4d2d..e5142c0529 100644 --- a/drivers/net/wireless/ath/wcn36xx/testmode.c +++ b/drivers/net/wireless/ath/wcn36xx/testmode.c @@ -53,7 +53,7 @@ static int wcn36xx_tm_cmd_ptt(struct wcn36xx *wcn, struct ieee80211_vif *vif, buf = nla_data(tb[WCN36XX_TM_ATTR_DATA]); buf_len = nla_len(tb[WCN36XX_TM_ATTR_DATA]); - msg = (struct ftm_rsp_msg *)buf; + msg = buf; wcn36xx_dbg(WCN36XX_DBG_TESTMODE, "testmode cmd wmi msg_id 0x%04X msg_len %d buf %pK buf_len %d\n", diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 40f9a7ef89..dbe4b3478f 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -2082,11 +2082,12 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil) static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_beacon_data *bcon) + struct cfg80211_ap_update *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = ndev->ieee80211_ptr; struct wil6210_vif *vif = ndev_to_vif(ndev); + struct cfg80211_beacon_data *bcon = ¶ms->beacon; int rc; u32 privacy = 0; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 6a5976a294..6fdb77d4c5 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -870,7 +870,6 @@ static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len) struct cfg80211_bss *bss; struct cfg80211_inform_bss bss_data = { .chan = channel, - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, .boottime_ns = ktime_to_ns(ktime_get_boottime()), }; @@ -1389,7 +1388,6 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len) u32 d_len; struct cfg80211_bss *bss; struct cfg80211_inform_bss bss_data = { - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .boottime_ns = ktime_to_ns(ktime_get_boottime()), }; diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 7c2d1c5881..461dce21de 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -571,7 +571,6 @@ static const struct { { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} }; static void build_wpa_mib(struct atmel_private *priv); -static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void atmel_copy_to_card(struct net_device *dev, u16 dest, const unsigned char *src, u16 len); static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, @@ -1487,7 +1486,6 @@ static const struct net_device_ops atmel_netdev_ops = { .ndo_stop = atmel_close, .ndo_set_mac_address = atmel_set_mac_address, .ndo_start_xmit = start_tx, - .ndo_do_ioctl = atmel_ioctl, .ndo_validate_addr = eth_validate_addr, }; @@ -2616,76 +2614,6 @@ static const struct iw_handler_def atmel_handler_def = { .get_wireless_stats = atmel_get_wireless_stats }; -static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - int i, rc = 0; - struct atmel_private *priv = netdev_priv(dev); - struct atmel_priv_ioctl com; - struct iwreq *wrq = (struct iwreq *) rq; - unsigned char *new_firmware; - char domain[REGDOMAINSZ + 1]; - - switch (cmd) { - case ATMELIDIFC: - wrq->u.param.value = ATMELMAGIC; - break; - - case ATMELFWL: - if (copy_from_user(&com, rq->ifr_data, sizeof(com))) { - rc = -EFAULT; - break; - } - - if (!capable(CAP_NET_ADMIN)) { - rc = -EPERM; - break; - } - - new_firmware = memdup_user(com.data, com.len); - if (IS_ERR(new_firmware)) { - rc = PTR_ERR(new_firmware); - break; - } - - kfree(priv->firmware); - - priv->firmware = new_firmware; - priv->firmware_length = com.len; - strncpy(priv->firmware_id, com.id, 31); - priv->firmware_id[31] = '\0'; - break; - - case ATMELRD: - if (copy_from_user(domain, rq->ifr_data, REGDOMAINSZ)) { - rc = -EFAULT; - break; - } - - if (!capable(CAP_NET_ADMIN)) { - rc = -EPERM; - break; - } - - domain[REGDOMAINSZ] = 0; - rc = -EINVAL; - for (i = 0; i < ARRAY_SIZE(channel_table); i++) { - if (!strcasecmp(channel_table[i].name, domain)) { - priv->config_reg_domain = channel_table[i].reg_domain; - rc = 0; - } - } - - if (rc == 0 && priv->station_state != STATION_STATE_DOWN) - rc = atmel_open(dev); - break; - - default: - rc = -EOPNOTSUPP; - } - - return rc; -} - struct auth_body { __le16 alg; __le16 trans_seq; diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index 9a7c62bd5e..760d1a28ed 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1531,9 +1531,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ - ieee80211_tx_status(dev->wl->hw, meta->skb); + ieee80211_tx_status_skb(dev->wl->hw, meta->skb); - /* skb will be freed by ieee80211_tx_status(). + /* skb will be freed by ieee80211_tx_status_skb(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c index 8c28a9250c..0cf70fdb60 100644 --- a/drivers/net/wireless/broadcom/b43/pio.c +++ b/drivers/net/wireless/broadcom/b43/pio.c @@ -582,7 +582,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev, q->buffer_used -= total_len; q->free_packet_slots += 1; - ieee80211_tx_status(dev->wl->hw, pack->skb); + ieee80211_tx_status_skb(dev->wl->hw, pack->skb); pack->skb = NULL; list_add(&pack->list, &q->packets_list); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 2a90bb24ba..44cea18dd2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3367,7 +3367,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, freq = ieee80211_channel_to_frequency(channel, band); bss_data.chan = ieee80211_get_channel(wiphy, freq); - bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20; bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); notify_capability = le16_to_cpu(bi->capability); @@ -3780,8 +3779,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req, if (req->channels[i] == chan) break; } - if (i == req->n_channels) - req->channels[req->n_channels++] = chan; + if (i == req->n_channels) { + req->n_channels++; + req->channels[i] = chan; + } for (i = 0; i < req->n_ssids; i++) { if (req->ssids[i].ssid_len == ssid_len && @@ -5416,13 +5417,13 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev, static s32 brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_beacon_data *info) + struct cfg80211_ap_update *info) { struct brcmf_if *ifp = netdev_priv(ndev); brcmf_dbg(TRACE, "Enter\n"); - return brcmf_config_ap_mgmt_ie(ifp->vif, info); + return brcmf_config_ap_mgmt_ie(ifp->vif, &info->beacon); } static int diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 09d2f2dc2b..83f8ed7d00 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -19,7 +19,7 @@ #define BRCMF_FW_MAX_NVRAM_SIZE 64000 #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ -#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ +#define BRCMF_FW_NVRAM_PCIEDEV_LEN 20 /* pcie/1/4/ + \0 */ #define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff" #define BRCMF_FW_MACADDR_FMT "macaddr=%pM" #define BRCMF_FW_MACADDR_LEN (7 + ETH_ALEN * 3) @@ -238,9 +238,9 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, u16 bus_nr) { /* Device path with a leading '=' key-value separator */ - char pci_path[] = "=pci/?/?"; + char pci_path[20]; size_t pci_len; - char pcie_path[] = "=pcie/?/?"; + char pcie_path[20]; size_t pcie_len; u32 i, j; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index 1266cbaee0..4002d326fd 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -69,7 +69,7 @@ struct brcmf_fw_request { u16 bus_nr; u32 n_items; const char *board_types[BRCMF_FW_MAX_BOARD_TYPES]; - struct brcmf_fw_item items[]; + struct brcmf_fw_item items[] __counted_by(n_items); }; struct brcmf_fw_name { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index dac7eb7779..68960ae989 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -33,7 +33,7 @@ struct brcmf_fweh_queue_item { u8 ifaddr[ETH_ALEN]; struct brcmf_event_msg_be emsg; u32 datalen; - u8 data[]; + u8 data[] __counted_by(datalen); }; /* @@ -418,17 +418,17 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, datalen + sizeof(*event_packet) > packet_len) return; - event = kzalloc(sizeof(*event) + datalen, gfp); + event = kzalloc(struct_size(event, data, datalen), gfp); if (!event) return; + event->datalen = datalen; event->code = code; event->ifidx = event_packet->msg.ifidx; /* use memcpy to get aligned event message */ memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); memcpy(event->data, data, datalen); - event->datalen = datalen; memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN); brcmf_fweh_queue_event(fweh, event); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 611d1a6aab..9d248ba1c0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -1214,7 +1214,7 @@ struct brcmf_gscan_config { u8 count_of_channel_buckets; u8 retry_threshold; __le16 lost_ap_window; - struct brcmf_gscan_bucket_config bucket[]; + struct brcmf_gscan_bucket_config bucket[] __counted_by(count_of_channel_buckets); }; /** diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 0812db8936..b6636002c7 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -317,8 +317,6 @@ static int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max); -static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, - size_t max); static void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_ucode_download(struct ipw2100_priv *priv, @@ -5894,17 +5892,14 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct ipw2100_priv *priv = libipw_priv(dev); - char fw_ver[64], ucode_ver[64]; + char fw_ver[64]; strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); - ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); - - snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", - fw_ver, priv->eeprom_version, ucode_ver); + strscpy(info->fw_version, fw_ver, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(priv->pci_dev), sizeof(info->bus_info)); } @@ -8406,17 +8401,6 @@ static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, return tmp; } -static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, - size_t max) -{ - u32 ver; - u32 len = sizeof(ver); - /* microcode version is a 32 bit integer */ - if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION, &ver, &len)) - return -EIO; - return snprintf(buf, max, "%08X", ver); -} - /* * On exit, the firmware will have been freed from the fw list */ diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 820100cac4..eed9ef17bc 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -9656,31 +9656,30 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, mutex_lock(&priv->mutex); switch (priv->ieee->mode) { case IEEE_A: - strncpy(extra, "802.11a (1)", MAX_WX_STRING); + strscpy_pad(extra, "802.11a (1)", MAX_WX_STRING); break; case IEEE_B: - strncpy(extra, "802.11b (2)", MAX_WX_STRING); + strscpy_pad(extra, "802.11b (2)", MAX_WX_STRING); break; case IEEE_A | IEEE_B: - strncpy(extra, "802.11ab (3)", MAX_WX_STRING); + strscpy_pad(extra, "802.11ab (3)", MAX_WX_STRING); break; case IEEE_G: - strncpy(extra, "802.11g (4)", MAX_WX_STRING); + strscpy_pad(extra, "802.11g (4)", MAX_WX_STRING); break; case IEEE_A | IEEE_G: - strncpy(extra, "802.11ag (5)", MAX_WX_STRING); + strscpy_pad(extra, "802.11ag (5)", MAX_WX_STRING); break; case IEEE_B | IEEE_G: - strncpy(extra, "802.11bg (6)", MAX_WX_STRING); + strscpy_pad(extra, "802.11bg (6)", MAX_WX_STRING); break; case IEEE_A | IEEE_B | IEEE_G: - strncpy(extra, "802.11abg (7)", MAX_WX_STRING); + strscpy_pad(extra, "802.11abg (7)", MAX_WX_STRING); break; default: - strncpy(extra, "unknown", MAX_WX_STRING); + strscpy_pad(extra, "unknown", MAX_WX_STRING); break; } - extra[MAX_WX_STRING - 1] = '\0'; IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); @@ -10378,7 +10377,6 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, { struct ipw_priv *p = libipw_priv(dev); char vers[64]; - char date[32]; u32 len; strscpy(info->driver, DRV_NAME, sizeof(info->driver)); @@ -10386,11 +10384,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, len = sizeof(vers); ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); - len = sizeof(date); - ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); - snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", - vers, date); + strscpy(info->fw_version, vers, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(p->pci_dev), sizeof(info->bus_info)); } diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h index bec7bc2737..9065ca5b02 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw.h +++ b/drivers/net/wireless/intel/ipw2x00/libipw.h @@ -488,7 +488,7 @@ struct libipw_txb { u8 reserved; u16 frag_size; u16 payload_size; - struct sk_buff *fragments[]; + struct sk_buff *fragments[] __counted_by(nr_frags); }; /* SWEEP TABLE ENTRIES NUMBER */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 0a4aa3c678..69276266ce 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -6122,7 +6122,7 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (il->ops->set_channel_switch(il, ch_switch)) { clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status); il->switch_channel = 0; - ieee80211_chswitch_done(il->vif, false); + ieee80211_chswitch_done(il->vif, false, 0); } out: diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 96002121bb..054fef680a 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4090,7 +4090,7 @@ il_chswitch_done(struct il_priv *il, bool is_success) return; if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) - ieee80211_chswitch_done(il->vif, is_success); + ieee80211_chswitch_done(il->vif, is_success, 0); } EXPORT_SYMBOL(il_chswitch_done); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c index 8d5f9dce71..134635c70c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_AX210_UCODE_API_MAX 83 +#define IWL_AX210_UCODE_API_MAX 86 /* Lowest firmware API version supported */ #define IWL_AX210_UCODE_API_MIN 59 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index 42e765fe3c..82da957adc 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 83 +#define IWL_BZ_UCODE_API_MAX 86 /* Lowest firmware API version supported */ #define IWL_BZ_UCODE_API_MIN 80 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 604e9cef6b..80eb9b4995 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 83 +#define IWL_SC_UCODE_API_MAX 86 /* Lowest firmware API version supported */ #define IWL_SC_UCODE_API_MIN 82 diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 75a4b8e262..04864d3fda 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2005-2014, 2023 Intel Corporation */ /* * Please use this file (commands.h) only for uCode API definitions. @@ -270,7 +270,7 @@ enum { #define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 #define IWL_PWR_CCK_ENTRIES 2 -/** +/* * struct tx_power_dual_stream * * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH @@ -281,7 +281,7 @@ struct tx_power_dual_stream { __le32 dw; } __packed; -/** +/* * Command REPLY_TX_POWER_DBM_CMD = 0x98 * struct iwlagn_tx_power_dbm_cmd */ @@ -295,7 +295,7 @@ struct iwlagn_tx_power_dbm_cmd { u8 reserved; } __packed; -/** +/* * Command TX_ANT_CONFIGURATION_CMD = 0x98 * This command is used to configure valid Tx antenna. * By default uCode concludes the valid antenna according to the radio flavor. @@ -313,7 +313,7 @@ struct iwl_tx_ant_config_cmd { #define UCODE_VALID_OK cpu_to_le32(0x1) -/** +/* * REPLY_ALIVE = 0x1 (response only, not a command) * * uCode issues this "alive" notification once the runtime image is ready @@ -534,7 +534,7 @@ enum { /* transfer to host non bssid beacons in associated state */ #define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) -/** +/* * REPLY_RXON = 0x10 (command, has simple generic response) * * RXON tunes the radio tuner to a service channel, and sets up a number @@ -681,6 +681,7 @@ struct iwl_csa_notification { * @aifsn: Number of slots in Arbitration Interframe Space (before * performing random backoff timing prior to Tx). Device default 1. * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * @reserved1: reserved for alignment * * Device will automatically increase contention window by (2*CW) + 1 for each * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW @@ -791,9 +792,11 @@ struct iwl_keyinfo { /** * struct sta_id_modify - * @addr[ETH_ALEN]: station's MAC address + * @addr: station's MAC address + * @reserved1: reserved for alignment * @sta_id: index of station in uCode's station table * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change + * @reserved2: reserved for alignment * * Driver selects unused table index when adding new station, * or the index to a pre-existing station entry when modifying that station. @@ -1464,7 +1467,7 @@ struct iwl_compressed_ba_resp { #define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) -/** +/* * struct iwl_link_qual_general_params * * Used in REPLY_TX_LINK_QUALITY_CMD @@ -1507,7 +1510,7 @@ struct iwl_link_qual_general_params { #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) -/** +/* * struct iwl_link_qual_agg_params * * Used in REPLY_TX_LINK_QUALITY_CMD @@ -2040,7 +2043,7 @@ struct iwl_spectrum_notification { * *****************************************************************************/ -/** +/* * struct iwl_powertable_cmd - Power Table Command * @flags: See below: * @@ -2171,7 +2174,7 @@ struct iwl_ct_kill_throttling_config { #define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0) #define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) -/** +/* * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table * * One for each channel in the scan list. @@ -2210,7 +2213,7 @@ struct iwl_scan_channel { /* set number of direct probes __le32 type */ #define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) -/** +/* * struct iwl_ssid_ie - directed scan network information element * * Up to 20 of these may appear in REPLY_SCAN_CMD, @@ -2560,6 +2563,7 @@ struct statistics_rx_bt { * @ant_a: current tx power on chain a in 1/2 dB step * @ant_b: current tx power on chain b in 1/2 dB step * @ant_c: current tx power on chain c in 1/2 dB step + * @reserved: reserved for alignment */ struct statistics_tx_power { u8 ant_a; @@ -3006,7 +3010,7 @@ struct iwl_enhance_sensitivity_cmd { } __packed; -/** +/* * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) * * This command sets the relative gains of agn device's 3 radio receiver chains. @@ -3847,6 +3851,7 @@ struct iwlagn_wowlan_status { * @type: * 0 - BSS * 1 - PAN + * @reserved: reserved for alignment */ struct iwl_wipan_slot { __le16 width; @@ -3874,6 +3879,8 @@ struct iwl_wipan_slot { * uCode will perform leaving channel methods in context switch * also when working in same channel mode * @num_slots: 1 - 10 + * @slots: per-slot data + * @reserved: reserved for alignment */ struct iwl_wipan_params_cmd { __le16 flags; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 1a9eadace1..25283e4b84 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2003 - 2014, 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2020, 2023 Intel Corporation. All rights reserved. *****************************************************************************/ /* * Please use this file (dev.h) for driver implementation definitions. @@ -126,11 +126,11 @@ enum iwl_agg_state { /** * struct iwl_ht_agg - aggregation state machine - + * * This structs holds the states for the BA agreement establishment and tear * down. It also holds the state during the BA session itself. This struct is * duplicated for each RA / TID. - + * * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * Tx response (REPLY_TX), and the block ack notification * (REPLY_COMPRESSED_BA). @@ -152,9 +152,9 @@ struct iwl_ht_agg { /** * struct iwl_tid_data - one for each RA / TID - + * * This structs holds the states for each RA / TID. - + * * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). @@ -195,7 +195,7 @@ struct iwl_station_priv { u8 sta_id; }; -/** +/* * struct iwl_vif_priv - driver's private per-interface information * * When mac80211 allocates a virtual interface, it can allocate @@ -529,6 +529,7 @@ enum iwl_scan_type { * relevant for 1000, 6000 and up * @struct iwl_sensitivity_ranges: range of sensitivity values * @use_rts_for_aggregation: use rts/cts protection for HT traffic + * @sens: sensitivity ranges pointer */ struct iwl_hw_params { u8 tx_chains_num; @@ -547,6 +548,7 @@ struct iwl_hw_params { * @bt_prio_boost: default bt priority boost value * @agg_time_limit: maximum number of uSec in aggregation * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode + * @bt_session_2: indicates version 2 of the BT command is used */ struct iwl_dvm_bt_params { bool advanced_bt_coexist; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index b1939ff275..5f3d5b15f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2018 - 2019, 2022 Intel Corporation + * Copyright(C) 2018 - 2019, 2022 - 2023 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -1001,7 +1001,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, if (priv->lib->set_channel_switch(priv, ch_switch)) { clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); + ieee80211_chswitch_done(ctx->vif, false, 0); } out: @@ -1024,7 +1024,7 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) return; if (ctx->vif) - ieee80211_chswitch_done(ctx->vif, is_success); + ieee80211_chswitch_done(ctx->vif, is_success, 0); } static void iwlagn_configure_filter(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index a873be109f..8774dd7b92 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1464,7 +1464,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, snprintf(priv->hw->wiphy->fw_version, sizeof(priv->hw->wiphy->fw_version), - "%s", fw->fw_version); + "%.31s", fw->fw_version); priv->new_scan_threshold_behaviour = !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index 0b47f1993c..100cb932c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2023 Intel Corporation. All rights reserved. *****************************************************************************/ #ifndef __iwl_agn_rs_h__ @@ -269,7 +269,7 @@ struct iwl_rate_mcs_info { char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; }; -/** +/* * struct iwl_rate_scale_data -- tx success history for one rate */ struct iwl_rate_scale_data { @@ -281,7 +281,7 @@ struct iwl_rate_scale_data { unsigned long stamp; }; -/** +/* * struct iwl_scale_tbl_info -- tx params and success history for all rates * * There are two of these in struct iwl_lq_sta, @@ -311,7 +311,7 @@ struct iwl_traffic_load { u8 head; /* start of the circular buffer */ }; -/** +/* * struct iwl_lq_sta -- driver's rate scaling private structure * * Pointer to this gets passed back and forth between driver and mac80211. @@ -379,7 +379,7 @@ static inline u8 first_antenna(u8 mask) void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id); -/** +/* * iwl_rate_control_register - Register the rate control algorithm callbacks * * Since the rate control algorithm is hardware specific, there is no need @@ -391,7 +391,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, */ int iwlagn_rate_control_register(void); -/** +/* * iwl_rate_control_unregister - Unregister the rate control callbacks * * This should be called after calling ieee80211_unregister_hw, but before diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index 7ace052fc7..23dfcda0dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2014, 2023 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -72,14 +72,15 @@ struct iwl_tt_trans { * when thermal throttling state != IWL_TI_0 * the tt_power_mode should set to different * power mode based on the current tt state - * @tt_previous_temperature: last measured temperature - * @iwl_tt_restriction: ptr to restriction tbl, used by advance + * @tt_previous_temp: last measured temperature + * @restriction: ptr to restriction tbl, used by advance * thermal throttling to determine how many tx/rx streams * should be used in tt state; and can HT be enabled or not - * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling + * @transaction: ptr to adv trans table, used by advance thermal throttling * state transaction * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature * @ct_kill_exit_tm: timer to exit thermal kill + * @ct_kill_waiting_tm: timer to enter thermal kill */ struct iwl_tt_mgmt { enum iwl_tt_state state; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index ca1daec641..111ed18730 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -1248,7 +1248,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } } @@ -1385,6 +1385,6 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index b26f90e522..dcc4810cb3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -618,7 +618,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -634,7 +634,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -650,7 +650,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -707,7 +707,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -723,7 +723,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -739,7 +739,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { - ret = PTR_ERR(wifi_pkg); + ret = -EINVAL; goto out_free; } @@ -1011,18 +1011,29 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { int ret; u8 value; + u32 val; __le32 config_bitmap = 0; /* - ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2' + * Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'. + * Setting config_bitmap Indonesia bit is valid only for HR/JF. */ - ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, - DSM_FUNC_ENABLE_INDONESIA_5G2, - &iwl_guid, &value); - - if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) - config_bitmap |= - cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) { + case IWL_CFG_RF_TYPE_HR1: + case IWL_CFG_RF_TYPE_HR2: + case IWL_CFG_RF_TYPE_JF1: + case IWL_CFG_RF_TYPE_JF2: + ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, + DSM_FUNC_ENABLE_INDONESIA_5G2, + &iwl_guid, &value); + + if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + break; + default: + break; + } /* ** Evaluate func 'DSM_FUNC_DISABLE_SRD' @@ -1039,6 +1050,23 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); } + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) { + /* + ** Evaluate func 'DSM_FUNC_REGULATORY_CONFIG' + */ + ret = iwl_acpi_get_dsm_u32(fwrt->dev, 0, + DSM_FUNC_REGULATORY_CONFIG, + &iwl_guid, &val); + /* + * China 2022 enable if the BIOS object does not exist or + * if it is enabled in BIOS. + */ + if (ret < 0 || val & DSM_MASK_CHINA_22_REG) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK); + } + return config_bitmap; } IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); @@ -1088,6 +1116,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + read_table: fwrt->ppag_ver = tbl_rev; flags = &wifi_pkg->package.elements[1]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index c36c62d641..e9277f6f35 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -134,10 +134,12 @@ enum iwl_dsm_funcs_rev_0 { DSM_FUNC_DISABLE_SRD = 1, DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, DSM_FUNC_ENABLE_6E = 3, + DSM_FUNC_REGULATORY_CONFIG = 4, DSM_FUNC_11AX_ENABLEMENT = 6, DSM_FUNC_ENABLE_UNII4_CHAN = 7, DSM_FUNC_ACTIVATE_CHANNEL = 8, - DSM_FUNC_FORCE_DISABLE_CHANNELS = 9 + DSM_FUNC_FORCE_DISABLE_CHANNELS = 9, + DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10, }; enum iwl_dsm_values_srd { @@ -164,6 +166,10 @@ enum iwl_dsm_values_rfi { DSM_VALUE_RFI_MAX }; +enum iwl_dsm_masks_reg { + DSM_MASK_CHINA_22_REG = BIT(2) +}; + #ifdef CONFIG_ACPI struct iwl_fw_runtime; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 13cb0d53a1..7544c4cb1a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -30,6 +30,8 @@ * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from * &enum iwl_regulatory_and_nvm_subcmd_ids * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds + * @STATISTICS_GROUP: Statistics group, uses command IDs from + * &enum iwl_statistics_subcmd_ids */ enum iwl_mvm_command_groups { LEGACY_GROUP = 0x0, @@ -44,6 +46,7 @@ enum iwl_mvm_command_groups { PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, + STATISTICS_GROUP = 0x10, }; /** @@ -616,10 +619,37 @@ enum iwl_system_subcmd_ids { */ SYSTEM_FEATURES_CONTROL_CMD = 0xd, + /** + * @SYSTEM_STATISTICS_CMD: &struct iwl_system_statistics_cmd + */ + SYSTEM_STATISTICS_CMD = 0xf, + + /** + * @SYSTEM_STATISTICS_END_NOTIF: &struct iwl_system_statistics_end_notif + */ + SYSTEM_STATISTICS_END_NOTIF = 0xfd, + /** * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif */ RFI_DEACTIVATE_NOTIF = 0xff, }; +/** + * enum iwl_statistics_subcmd_ids - Statistics group command IDs + */ +enum iwl_statistics_subcmd_ids { + /** + * @STATISTICS_OPER_NOTIF: Notification about operational + * statistics &struct iwl_system_statistics_notif_oper + */ + STATISTICS_OPER_NOTIF = 0x0, + + /** + * @STATISTICS_OPER_PART1_NOTIF: Notification about operational part1 + * statistics &struct iwl_system_statistics_part1_notif_oper + */ + STATISTICS_OPER_PART1_NOTIF = 0x1, +}; + #endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 72d461c473..ea99d41040 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -397,6 +397,8 @@ struct iwl_wowlan_config_cmd { #define WOWLAN_GTK_KEYS_NUM 2 #define WOWLAN_IGTK_KEYS_NUM 2 #define WOWLAN_IGTK_MIN_INDEX 4 +#define WOWLAN_BIGTK_KEYS_NUM 2 +#define WOWLAN_BIGTK_MIN_INDEX 6 /* * WOWLAN_TSC_RSC_PARAMS @@ -621,9 +623,10 @@ struct iwl_wowlan_gtk_status_v3 { * @ipn: the IGTK packet number (replay counter) * @key_len: IGTK length, if set to 0, the key is not available * @key_flags: information about the key: - * bits[0]: key index assigned by the AP (0: index 4, 1: index 5) - * bits[1:5]: IGTK index of the key in the internal DB - * bit[6]: Set iff this is the currently used IGTK + * bits[0]: key index assigned by the AP (0: index 4, 1: index 5) + * (0: index 6, 1: index 7 with bigtk) + * bits[1:5]: IGTK index of the key in the internal DB + * bit[6]: Set iff this is the currently used IGTK */ struct iwl_wowlan_igtk_status { u8 key[WOWLAN_KEY_MAX_SIZE]; @@ -807,10 +810,44 @@ struct iwl_wowlan_info_notif_v1 { u8 reserved2[2]; } __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */ +/** + * struct iwl_wowlan_info_notif_v2 - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @reserved1: reserved + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @reserved2: reserved + */ +struct iwl_wowlan_info_notif_v2 { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ + /** * struct iwl_wowlan_info_notif - WoWLAN information notification * @gtk: GTK data * @igtk: IGTK data + * @bigtk: BIGTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched patterns * @reserved1: reserved @@ -827,6 +864,7 @@ struct iwl_wowlan_info_notif_v1 { struct iwl_wowlan_info_notif { struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; __le16 reserved1; @@ -838,7 +876,7 @@ struct iwl_wowlan_info_notif { u8 tid_tear_down; u8 station_id; u8 reserved2[2]; -} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */ /** * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 39bee9c00e..394747deb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ @@ -42,6 +42,30 @@ struct iwl_fw_ini_header { /* followed by the data */ } __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */ +/** + * struct iwl_fw_ini_addr_size - Base address and size that defines + * a chunk of memory + * + * @addr: the base address (fixed size - 4 bytes) + * @size: the size to read + */ +struct iwl_fw_ini_addr_size { + __le32 addr; + __le32 size; +} __packed; /* FW_TLV_DEBUG_ADDR_SIZE_VER_1 */ + +/** + * struct iwl_fw_ini_region_dev_addr_range - Configuration to read + * device address range + * + * @offset: offset to add to the base address of each chunk + * The addrs[] array will be treated as an array of &iwl_fw_ini_addr_size - + * an array of (addr, size) pairs. + */ +struct iwl_fw_ini_region_dev_addr_range { + __le32 offset; +} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_RANGE_API_S_VER_1 */ + /** * struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses * @@ -135,6 +159,10 @@ struct iwl_fw_ini_region_internal_buffer { * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, + * &IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, + * @dev_addr_range: device address range configuration. Used by + * &IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and + * &IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and * &IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by @@ -157,6 +185,7 @@ struct iwl_fw_ini_region_tlv { u8 name[IWL_FW_INI_MAX_NAME]; union { struct iwl_fw_ini_region_dev_addr dev_addr; + struct iwl_fw_ini_region_dev_addr_range dev_addr_range; struct iwl_fw_ini_region_fifos fifos; struct iwl_fw_ini_region_err_table err_table; struct iwl_fw_ini_region_internal_buffer internal_buffer; @@ -362,6 +391,9 @@ enum iwl_fw_ini_buffer_location { * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory * @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM + * @IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE: a range of periphery registers of MAC + * @IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE: a range of periphery registers of PHY + * @IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP: periphery registers of SNPS DPHYIP * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { @@ -384,6 +416,9 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_PCI_IOSF_CONFIG, IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY, IWL_FW_INI_REGION_DBGI_SRAM, + IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE, + IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE, + IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 90ce8d9b6a..8248c3beb1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -522,4 +522,26 @@ enum iwl_mvm_tas_statically_disabled_reason { TAS_DISABLED_REASON_MAX, }; /*_TAS_STATICALLY_DISABLED_REASON_E*/ +/** + * enum iwl_fw_dbg_config_cmd_type - types of FW debug config command + * @DEBUG_TOKEN_CONFIG_TYPE: token config type + */ +enum iwl_fw_dbg_config_cmd_type { + DEBUG_TOKEN_CONFIG_TYPE = 0x2B, +}; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */ + +/* this token disables debug asserts in the firmware */ +#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001 + +/** + * struct iwl_fw_dbg_config_cmd - configure FW debug + * + * @type: according to &enum iwl_fw_dbg_config_cmd_type + * @conf: FW configuration + */ +struct iwl_fw_dbg_config_cmd { + __le32 type; + __le32 conf; +} __packed; /* LDBG_CFG_CMD_API_S_VER_7 */ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index 184db5a6f0..f15e6d64c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -57,6 +57,14 @@ enum iwl_mac_conf_subcmd_ids { * @STA_DISABLE_TX_CMD: &struct iwl_mvm_sta_disable_tx_cmd */ STA_DISABLE_TX_CMD = 0xD, + /** + * @ROC_CMD: &struct iwl_roc_req + */ + ROC_CMD = 0xE, + /** + * @ROC_NOTIF: &struct iwl_roc_notif + */ + ROC_NOTIF = 0xF8, /** * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 28bfabb399..dfe0bebabc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -21,8 +21,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids { * &struct iwl_lari_config_change_cmd_v2, * &struct iwl_lari_config_change_cmd_v3, * &struct iwl_lari_config_change_cmd_v4, - * &struct iwl_lari_config_change_cmd_v5 or - * &struct iwl_lari_config_change_cmd_v6 + * &struct iwl_lari_config_change_cmd_v5, + * &struct iwl_lari_config_change_cmd_v6 or + * &struct iwl_lari_config_change_cmd_v7 */ LARI_CONFIG_CHANGE = 0x1, @@ -43,6 +44,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids { */ SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, + /** + * @UATS_TABLE_CMD: &struct iwl_uats_table_cmd + */ + UATS_TABLE_CMD = 0x5, + /** * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy */ @@ -480,18 +486,20 @@ union iwl_tas_config_cmd { struct iwl_tas_config_cmd_v4 v4; }; /** - * enum iwl_lari_configs - bit masks for the various LARI config operations + * enum iwl_lari_config_masks - bit masks for the various LARI config operations * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan * @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled * @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in * Indonesia + * @LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK: enable 2022 china regulatory */ enum iwl_lari_config_masks { LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0), LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1), LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2), LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3), + LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK = BIT(7), }; #define IWL_11AX_UKRAINE_MASK 3 @@ -600,6 +608,45 @@ struct iwl_lari_config_change_cmd_v6 { __le32 force_disable_channels_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */ +/** + * struct iwl_lari_config_change_cmd_v7 - change LARI configuration + * This structure is used also for lari cmd version 8. + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * @chan_state_active_bitmap: Bitmap to enable different bands per country + * or region. + * Each bit represents a country or region, and a band to activate + * according to the BIOS definitions. + * For LARI cmd version 7 - bits 0:3 are supported. + * For LARI cmd version 8 - bits 0:4 are supported. + * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. + * Each bit represents a set of channels in a specific band that should be + * disabled + * @edt_bitmap: Bitmap of energy detection threshold table. + * Disable/enable the EDT optimization method for different band. + */ +struct iwl_lari_config_change_cmd_v7 { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; + __le32 edt_bitmap; +} __packed; +/* LARI_CHANGE_CONF_CMD_S_VER_7 */ +/* LARI_CHANGE_CONF_CMD_S_VER_8 */ + +/* Activate UNII-1 (5.2GHz) for World Wide */ +#define ACTIVATE_5G2_IN_WW_MASK BIT(4) + /** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete * @status: PNVM image loading status @@ -608,4 +655,17 @@ struct iwl_pnvm_init_complete_ntfy { __le32 status; } __packed; /* PNVM_INIT_COMPLETE_NTFY_S_VER_1 */ +#define UATS_TABLE_ROW_SIZE 26 +#define UATS_TABLE_COL_SIZE 13 + +/** + * struct iwl_uats_table_cmd - struct for UATS_TABLE_CMD + * @offset_map: mapping a mcc to UHB AP type support (UATS) allowed + * @reserved: reserved + */ +struct iwl_uats_table_cmd { + u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE]; + __le16 reserved; +} __packed; /* UATS_TABLE_CMD_S_VER_1 */ + #endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index 898bf351f6..2d2b9c8c36 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -3,7 +3,7 @@ * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ @@ -18,7 +18,9 @@ enum iwl_prot_offload_subcmd_ids { WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC, /** - * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif + * @WOWLAN_INFO_NOTIFICATION: Notification in + * &struct iwl_wowlan_info_notif_v1, &struct iwl_wowlan_info_notif_v2, + * or iwl_wowlan_info_notif */ WOWLAN_INFO_NOTIFICATION = 0xFD, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index 8fe42cff11..306ed88de4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -25,8 +25,8 @@ * For legacy set bit means upper channel, otherwise lower. * For VHT - bit-2 marks if the control is lower/upper relative to center-freq * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. - * center_freq * For EHT - bit-3 is used for extended distance + * center_freq * | * 40Mhz |____|____| * 80Mhz |____|____|____|____| diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 85d89f559f..040d83fa54 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -144,6 +144,8 @@ struct iwl_powertable_cmd { * receiver and transmitter. '0' - does not allow. * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK: * Device Retention indication, '1' indicate retention is enabled. + * @DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK: + * Prevent power save until entering d3 is completed. * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK: * 32Khz external slow clock valid indication, '1' indicate cloack is * valid. @@ -151,6 +153,7 @@ struct iwl_powertable_cmd { enum iwl_device_power_flags { DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1), + DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK = BIT(7), DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12), }; @@ -162,7 +165,7 @@ enum iwl_device_power_flags { * @reserved: reserved (padding) */ struct iwl_device_power_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_6 */ + /* PM_POWER_TABLE_CMD_API_S_VER_7 */ __le16 flags; __le16 reserved; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h index 1a84a4081e..34d6640234 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2021, 2023 Intel Corporation */ #ifndef __iwl_fw_api_rfi_h__ #define __iwl_fw_api_rfi_h__ @@ -25,8 +25,9 @@ struct iwl_rfi_lut_entry { /** * struct iwl_rfi_config_cmd - RFI configuration table * - * @entry: a table can have 24 frequency/channel mappings + * @table: a table can have 24 frequency/channel mappings * @oem: specifies if this is the default table or set by OEM + * @reserved: (reserved/padding) */ struct iwl_rfi_config_cmd { struct iwl_rfi_lut_entry table[IWL_RFI_LUT_SIZE]; @@ -35,7 +36,7 @@ struct iwl_rfi_config_cmd { } __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */ /** - * iwl_rfi_freq_table_status - status of the frequency table query + * enum iwl_rfi_freq_table_status - status of the frequency table query * @RFI_FREQ_TABLE_OK: can be used * @RFI_FREQ_TABLE_DVFS_NOT_READY: DVFS is not ready yet, should try later * @RFI_FREQ_TABLE_DISABLED: the feature is disabled in FW diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 25e2e23dce..e71f29d0c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -371,7 +371,7 @@ enum iwl_rx_phy_eht_data1 { IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7 = 0x0000fe00, }; -/* goes into Metadata DW 7 */ +/* goes into Metadata DW 7 (Qu) or 8 (So or higher) */ enum iwl_rx_phy_he_data2 { /* info type: HE MU-EXT */ /* the a1/a2/... is what the PHY/firmware calls the values */ @@ -387,7 +387,7 @@ enum iwl_rx_phy_he_data2 { IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000, }; -/* goes into Metadata DW 8 */ +/* goes into Metadata DW 8 (Qu) or 7 (So or higher) */ enum iwl_rx_phy_he_data3 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */ @@ -408,10 +408,9 @@ enum iwl_rx_phy_he_he_data4 { IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600, }; -/* goes into Metadata DW 7 */ +/* goes into Metadata DW 8 (Qu has no EHT) */ enum iwl_rx_phy_eht_data2 { /* info type: EHT-MU-EXT */ - /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_0_OUT */ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 0x000001ff, IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 0x0003fe00, IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_B1 = 0x07fc0000, @@ -420,11 +419,10 @@ enum iwl_rx_phy_eht_data2 { IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 0xffffffff, }; -/* goes into Metadata DW 8 */ +/* goes into Metadata DW 7 (Qu has no EHT) */ enum iwl_rx_phy_eht_data3 { + /* note: low 8 bits cannot be used */ /* info type: EHT-MU-EXT */ - /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_1_OUT */ - IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B2 = 0x000001ff, IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C1 = 0x0003fe00, IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C2 = 0x07fc0000, }; @@ -432,10 +430,10 @@ enum iwl_rx_phy_eht_data3 { /* goes into Metadata DW 4 */ enum iwl_rx_phy_eht_data4 { /* info type: EHT-MU-EXT */ - /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_2_OUT */ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D1 = 0x000001ff, IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D2 = 0x0003fe00, IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 0x000c0000, + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_B2 = 0x1ff00000, }; /* goes into Metadata DW 16 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 898e62326e..2271b19213 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_stats_h__ #define __iwl_fw_api_stats_h__ #include "mac.h" +#include "mac-cfg.h" struct mvm_statistics_dbg { __le32 burst_check; @@ -411,6 +412,49 @@ struct iwl_statistics_cmd { #define MAX_BCAST_FILTER_NUM 8 +/** + * enum iwl_statistics_notify_type_id - type_id used in system statistics + * command + * @IWL_STATS_NTFY_TYPE_ID_OPER: request legacy statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART1: request operational part1 statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART2: request operational part2 statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART3: request operational part3 statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART4: request operational part4 statistics + */ +enum iwl_statistics_notify_type_id { + IWL_STATS_NTFY_TYPE_ID_OPER = BIT(0), + IWL_STATS_NTFY_TYPE_ID_OPER_PART1 = BIT(1), + IWL_STATS_NTFY_TYPE_ID_OPER_PART2 = BIT(2), + IWL_STATS_NTFY_TYPE_ID_OPER_PART3 = BIT(3), + IWL_STATS_NTFY_TYPE_ID_OPER_PART4 = BIT(4), +}; + +/** + * enum iwl_statistics_cfg_flags - cfg_mask used in system statistics command + * @IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK: 0 for enable, 1 for disable + * @IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK: 0 for periodic, 1 for on-demand + * @IWL_STATS_CFG_FLG_RESET_MSK: 0 for reset statistics after + * sending the notification, 1 for do not reset statistics after sending + * the notification + */ +enum iwl_statistics_cfg_flags { + IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK = BIT(0), + IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK = BIT(1), + IWL_STATS_CFG_FLG_RESET_MSK = BIT(2), +}; + +/** + * struct iwl_system_statistics_cmd - system statistics command + * @cfg_mask: configuration mask, &enum iwl_statistics_cfg_flags + * @config_time_sec: time in sec for periodic notification + * @type_id_mask: type_id masks, &enum iwl_statistics_notify_type_id + */ +struct iwl_system_statistics_cmd { + __le32 cfg_mask; + __le32 config_time_sec; + __le32 type_id_mask; +} __packed; /* STATISTICS_FW_CMD_API_S_VER_1 */ + /** * enum iwl_fw_statistics_type * @@ -447,7 +491,49 @@ struct iwl_statistics_ntfy_hdr { }; /* STATISTICS_NTFY_HDR_API_S_VER_1 */ /** - * struct iwl_statistics_ntfy_per_mac + * struct iwl_stats_ntfy_per_link + * + * @beacon_filter_average_energy: Average energy [-dBm] of the 2 + * antennas. + * @air_time: air time + * @beacon_counter: all beacons (both filtered and not filtered) + * @beacon_average_energy: Average energy [-dBm] of all beacons + * (both filtered and not filtered) + * @beacon_rssi_a: beacon RSSI on antenna A + * @beacon_rssi_b: beacon RSSI on antenna B + * @rx_bytes: RX byte count + */ +struct iwl_stats_ntfy_per_link { + __le32 beacon_filter_average_energy; + __le32 air_time; + __le32 beacon_counter; + __le32 beacon_average_energy; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 rx_bytes; +} __packed; /* STATISTICS_NTFY_PER_LINK_API_S_VER_1 */ + +/** + * struct iwl_stats_ntfy_part1_per_link + * + * @rx_time: rx time + * @tx_time: tx time + * @rx_action: action frames handled by FW + * @tx_action: action frames generated and transmitted by FW + * @cca_defers: cca defer count + * @beacon_filtered: filtered out beacons + */ +struct iwl_stats_ntfy_part1_per_link { + __le64 rx_time; + __le64 tx_time; + __le32 rx_action; + __le32 tx_action; + __le32 cca_defers; + __le32 beacon_filtered; +} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_PER_LINK_API_S_VER_1 */ + +/** + * struct iwl_stats_ntfy_per_mac * * @beacon_filter_average_energy: Average energy [-dBm] of the 2 * antennas. @@ -459,7 +545,7 @@ struct iwl_statistics_ntfy_hdr { * @beacon_rssi_b: beacon RSSI on antenna B * @rx_bytes: RX byte count */ -struct iwl_statistics_ntfy_per_mac { +struct iwl_stats_ntfy_per_mac { __le32 beacon_filter_average_energy; __le32 air_time; __le32 beacon_counter; @@ -470,7 +556,7 @@ struct iwl_statistics_ntfy_per_mac { } __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */ #define IWL_STATS_MAX_BW_INDEX 5 -/** struct iwl_statistics_ntfy_per_phy +/** struct iwl_stats_ntfy_per_phy * @channel_load: channel load * @channel_load_by_us: device contribution to MCLM * @channel_load_not_by_us: other devices' contribution to MCLM @@ -485,7 +571,7 @@ struct iwl_statistics_ntfy_per_mac { * per channel BW. note BACK counted as 1 * @last_tx_ch_width_indx: last txed frame channel width index */ -struct iwl_statistics_ntfy_per_phy { +struct iwl_stats_ntfy_per_phy { __le32 channel_load; __le32 channel_load_by_us; __le32 channel_load_not_by_us; @@ -499,23 +585,62 @@ struct iwl_statistics_ntfy_per_phy { } __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */ /** - * struct iwl_statistics_ntfy_per_sta + * struct iwl_stats_ntfy_per_sta * * @average_energy: in fact it is minus the energy.. */ -struct iwl_statistics_ntfy_per_sta { +struct iwl_stats_ntfy_per_sta { __le32 average_energy; } __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */ -#define IWL_STATS_MAX_PHY_OPERTINAL 3 +#define IWL_STATS_MAX_PHY_OPERATIONAL 3 +#define IWL_STATS_MAX_FW_LINKS (IWL_MVM_FW_MAX_LINK_ID + 1) + +/** + * struct iwl_system_statistics_notif_oper + * + * @time_stamp: time when the notification is sent from firmware + * @per_link: per link statistics, &struct iwl_stats_ntfy_per_link + * @per_phy: per phy statistics, &struct iwl_stats_ntfy_per_phy + * @per_sta: per sta statistics, &struct iwl_stats_ntfy_per_sta + */ +struct iwl_system_statistics_notif_oper { + __le32 time_stamp; + struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS]; + struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; +} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */ + +/** + * struct iwl_system_statistics_part1_notif_oper + * + * @time_stamp: time when the notification is sent from firmware + * @per_link: per link statistics &struct iwl_stats_ntfy_part1_per_link + * @per_phy_crc_error_stats: per phy crc error statistics + */ +struct iwl_system_statistics_part1_notif_oper { + __le32 time_stamp; + struct iwl_stats_ntfy_part1_per_link per_link[IWL_STATS_MAX_FW_LINKS]; + __le32 per_phy_crc_error_stats[IWL_STATS_MAX_PHY_OPERATIONAL]; +} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_API_S_VER_4 */ + +/** + * struct iwl_system_statistics_end_notif + * + * @time_stamp: time when the notification is sent from firmware + */ +struct iwl_system_statistics_end_notif { + __le32 time_stamp; +} __packed; /* STATISTICS_FW_NTFY_END_API_S_VER_1 */ + /** * struct iwl_statistics_operational_ntfy * * @hdr: general statistics header * @flags: bitmap of possible notification structures - * @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac - * @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy - * @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta + * @per_mac: per mac statistics, &struct iwl_stats_ntfy_per_mac + * @per_phy: per phy statistics, &struct iwl_stats_ntfy_per_phy + * @per_sta: per sta statistics, &struct iwl_stats_ntfy_per_sta * @rx_time: rx time * @tx_time: usec the radio is transmitting. * @on_time_rf: The total time in usec the RF is awake. @@ -524,9 +649,9 @@ struct iwl_statistics_ntfy_per_sta { struct iwl_statistics_operational_ntfy { struct iwl_statistics_ntfy_hdr hdr; __le32 flags; - struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX]; - struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL]; - struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_stats_ntfy_per_mac per_mac[MAC_INDEX_AUX]; + struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; __le64 rx_time; __le64 tx_time; __le64 on_time_rf; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 7cc706731d..2e15be71c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020, 2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2022-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -335,6 +335,63 @@ struct iwl_hs20_roc_res { __le32 status; } __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ +/* + * Activity types for the ROC command + * @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity + * @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity + * @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity + */ +enum iwl_roc_activity { + ROC_ACTIVITY_HOTSPOT, + ROC_ACTIVITY_P2P_DISC, + ROC_ACTIVITY_P2P_TXRX, + ROC_NUM_ACTIVITIES +}; /* ROC_ACTIVITY_API_E_VER_1 */ + +/* + * ROC command + * + * Command requests the firmware to remain on a channel for a certain duration. + * + * ( MAC_CONF_GROUP 0x3, ROC_CMD 0xE ) + * + * @action: action to perform, see &enum iwl_ctxt_action + * @activity: type of activity, see &enum iwl_roc_activity + * @sta_id: station id, resumed during "Remain On Channel" activity. + * @channel_info: &struct iwl_fw_channel_info + * @node_addr: node MAC address for Rx filtering + * @reserved: align to a dword + * @max_delay: max delay the ROC can start in TU + * @duration: remain on channel duration in TU + */ +struct iwl_roc_req { + __le32 action; + __le32 activity; + __le32 sta_id; + struct iwl_fw_channel_info channel_info; + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 max_delay; + __le32 duration; +} __packed; /* ROC_CMD_API_S_VER_3 */ + +/* + * ROC notification + * + * Notification when ROC startes and when ROC ended. + * + * ( MAC_CONF_GROUP 0x3, ROC_NOTIF 0xf8 ) + * + * @status: true if ROC succeeded to start + * @start_end: true if ROC started, false if ROC ended + * @activity: notification to which activity - &enum iwl_roc_activity + */ +struct iwl_roc_notif { + __le32 success; + __le32 started; + __le32 activity; +} __packed; /* ROC_NOTIF_API_S_VER_1 */ + /** * enum iwl_mvm_session_prot_conf_id - session protection's configurations * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association. @@ -375,8 +432,8 @@ enum iwl_mvm_session_prot_conf_id { /** * struct iwl_mvm_session_prot_cmd - configure a session protection - * @id_and_color: the id and color of the mac for which this session protection - * is sent + * @id_and_color: the id and color of the link (or mac, for command version 1) + * for which this session protection is sent * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE, * see &enum iwl_ctxt_action * @conf_id: see &enum iwl_mvm_session_prot_conf_id @@ -397,11 +454,15 @@ struct iwl_mvm_session_prot_cmd { __le32 duration_tu; __le32 repetition_count; __le32 interval; -} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */ +} __packed; +/* SESSION_PROTECTION_CMD_API_S_VER_1 and + * SESSION_PROTECTION_CMD_API_S_VER_2 + */ /** * struct iwl_mvm_session_prot_notif - session protection started / ended - * @mac_id: the mac id for which the session protection started / ended + * @mac_link_id: the mac id (or link id, for notif ver > 2) for which the + * session protection started / ended * @status: 1 means success, 0 means failure * @start: 1 means the session protection started, 0 means it ended * @conf_id: see &enum iwl_mvm_session_prot_conf_id @@ -410,10 +471,13 @@ struct iwl_mvm_session_prot_cmd { * and end even the firmware could not schedule it. */ struct iwl_mvm_session_prot_notif { - __le32 mac_id; + __le32 mac_link_id; __le32 status; __le32 start; __le32 conf_id; -} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */ +} __packed; +/* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 and + * SESSION_PROTECTION_NOTIFICATION_API_S_VER_3 + */ #endif /* __iwl_fw_api_time_event_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index e018946310..9c69d36743 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2019-2021 Intel Corporation + * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -76,7 +76,7 @@ enum iwl_tx_queue_cfg_actions { TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), }; -#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4) +#define IWL_DEFAULT_QUEUE_SIZE_EHT (512 * 4) #define IWL_DEFAULT_QUEUE_SIZE_HE 1024 #define IWL_DEFAULT_QUEUE_SIZE 256 #define IWL_MGMT_QUEUE_SIZE 16 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 3ab6a68f1e..3975a53a9f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1021,22 +1021,18 @@ struct iwl_dump_ini_region_data { struct iwl_fwrt_dump_data *dump_data; }; -static int -iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, - struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, u32 range_len, int idx) +static int iwl_dump_ini_prph_mac_iter_common(struct iwl_fw_runtime *fwrt, + void *range_ptr, u32 addr, + __le32 size) { - struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_val; - u32 addr = le32_to_cpu(reg->addrs[idx]) + - le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); - range->range_data_size = reg->dev_addr.size; - for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { + range->range_data_size = size; + for (i = 0; i < le32_to_cpu(size); i += 4) { prph_val = iwl_read_prph(fwrt->trans, addr + i); if (iwl_trans_is_hw_error_value(prph_val)) return -EBUSY; @@ -1047,38 +1043,61 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, } static int -iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, +iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 addr = le32_to_cpu(reg->addrs[idx]) + + le32_to_cpu(reg->dev_addr.offset); + + return iwl_dump_ini_prph_mac_iter_common(fwrt, range_ptr, addr, + reg->dev_addr.size); +} + +static int +iwl_dump_ini_prph_mac_block_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs; + u32 addr = le32_to_cpu(reg->dev_addr_range.offset) + + le32_to_cpu(pairs[idx].addr); + + return iwl_dump_ini_prph_mac_iter_common(fwrt, range_ptr, addr, + pairs[idx].size); +} + +static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt, + void *range_ptr, u32 addr, + __le32 size, __le32 offset) +{ struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1; u32 indirect_rd_addr = WMAL_MRSPF_1; u32 prph_val; - u32 addr = le32_to_cpu(reg->addrs[idx]); u32 dphy_state; u32 dphy_addr; int i; range->internal_base_addr = cpu_to_le32(addr); - range->range_data_size = reg->dev_addr.size; + range->range_data_size = size; if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) indirect_wr_addr = WMAL_INDRCT_CMD1; - indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset); - indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset); + indirect_wr_addr += le32_to_cpu(offset); + indirect_rd_addr += le32_to_cpu(offset); if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; - dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW : - WFPM_LMAC1_PS_CTL_RW; + dphy_addr = (offset) ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW; dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); - for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { + for (i = 0; i < le32_to_cpu(size); i += 4) { if (dphy_state == HBUS_TIMEOUT || (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != WFPM_PHYRF_STATE_ON) { @@ -1097,6 +1116,33 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 addr = le32_to_cpu(reg->addrs[idx]); + + return iwl_dump_ini_prph_phy_iter_common(fwrt, range_ptr, addr, + reg->dev_addr.size, + reg->dev_addr.offset); +} + +static int +iwl_dump_ini_prph_phy_block_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs; + u32 addr = le32_to_cpu(pairs[idx].addr); + + return iwl_dump_ini_prph_phy_iter_common(fwrt, range_ptr, addr, + pairs[idx].size, + reg->dev_addr_range.offset); +} + static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) @@ -1370,6 +1416,53 @@ out: return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_prph_snps_dphyip_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_error_dump_range *range = range_ptr; + __le32 *val = range->data; + __le32 offset = reg->dev_addr.offset; + u32 indirect_rd_wr_addr = DPHYIP_INDIRECT; + u32 addr = le32_to_cpu(reg->addrs[idx]); + u32 dphy_state, dphy_addr, prph_val; + int i; + + range->internal_base_addr = cpu_to_le32(addr); + range->range_data_size = reg->dev_addr.size; + + if (!iwl_trans_grab_nic_access(fwrt->trans)) + return -EBUSY; + + indirect_rd_wr_addr += le32_to_cpu(offset); + + dphy_addr = offset ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW; + dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); + + for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { + if (dphy_state == HBUS_TIMEOUT || + (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != + WFPM_PHYRF_STATE_ON) { + *val++ = cpu_to_le32(WFPM_DPHY_OFF); + continue; + } + + iwl_write_prph_no_grab(fwrt->trans, indirect_rd_wr_addr, + addr + i); + /* wait a bit for value to be ready in register */ + udelay(1); + prph_val = iwl_read_prph_no_grab(fwrt->trans, + indirect_rd_wr_addr); + *val++ = cpu_to_le32((prph_val & DPHYIP_INDIRECT_RD_MSK) >> + DPHYIP_INDIRECT_RD_SHIFT); + } + + iwl_trans_release_nic_access(fwrt->trans); + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + struct iwl_ini_rxf_data { u32 fifo_num; u32 size; @@ -1781,6 +1874,16 @@ static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); } +static u32 +iwl_dump_ini_mem_block_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + size_t size = sizeof(struct iwl_fw_ini_addr_size); + + return iwl_tlv_array_len_with_size(reg_data->reg_tlv, reg, size); +} + static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -1866,6 +1969,25 @@ static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, (size + sizeof(struct iwl_fw_ini_error_dump_range)); } +static u32 +iwl_dump_ini_mem_block_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs; + u32 ranges = iwl_dump_ini_mem_block_ranges(fwrt, reg_data); + u32 size = sizeof(struct iwl_fw_ini_error_dump); + int range; + + if (!ranges) + return 0; + + for (range = 0; range < ranges; range++) + size += le32_to_cpu(pairs[range].size); + + return size + ranges * sizeof(struct iwl_fw_ini_error_dump_range); +} + static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) @@ -2413,6 +2535,18 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_phy_iter, }, + [IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE] = { + .get_num_of_ranges = iwl_dump_ini_mem_block_ranges, + .get_size = iwl_dump_ini_mem_block_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_mac_block_iter, + }, + [IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE] = { + .get_num_of_ranges = iwl_dump_ini_mem_block_ranges, + .get_size = iwl_dump_ini_mem_block_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_phy_block_iter, + }, [IWL_FW_INI_REGION_PERIPHERY_AUX] = {}, [IWL_FW_INI_REGION_PAGING] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, @@ -2450,6 +2584,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header, .fill_range = iwl_dump_ini_dbgi_sram_iter, }, + [IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_snps_dphyip_iter, + }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, @@ -2492,7 +2632,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; - if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY && + if ((reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY || + reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE || + reg_type == IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP) && tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) { IWL_WARN(fwrt, "WRT: trying to collect phy prph at time point: %d, skipping\n", @@ -3228,3 +3370,28 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, #endif } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); + +void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt) +{ + struct iwl_fw_dbg_config_cmd cmd = { + .type = cpu_to_le32(DEBUG_TOKEN_CONFIG_TYPE), + .conf = cpu_to_le32(IWL_FW_DBG_CONFIG_TOKEN), + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LONG_GROUP, LDBG_CONFIG_CMD), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u32 preset = u32_get_bits(fwrt->trans->dbg.domains_bitmap, + GENMASK(31, IWL_FW_DBG_DOMAIN_POS + 1)); + + /* supported starting from 9000 devices */ + if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + return; + + if (fwrt->trans->dbg.yoyo_bin_loaded || (preset && preset != 1)) + return; + + iwl_trans_send_cmd(fwrt->trans, &hcmd); +} +IWL_EXPORT_SYMBOL(iwl_fw_disable_dbg_asserts); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 4227fbd2b9..66b233250c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -329,6 +329,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data); +void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt); #define IWL_FW_CHECK_FAILED(_obj, _fmt, ...) \ IWL_ERR_LIMIT(_obj, _fmt, __VA_ARGS__) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 3cdbc6ac7a..751a125a15 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -141,7 +141,11 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, event_cfg.enabled_severities = cpu_to_le32(enabled_severities); - ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + if (fwrt->ops && fwrt->ops->send_hcmd) + ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); + else + ret = -EPERM; + IWL_INFO(fwrt, "sent host event cfg with enabled_severities: %u, ret: %d\n", enabled_severities, ret); @@ -342,6 +346,12 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v) " %d: %d\n", IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT, has_capa); + has_capa = fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT) ? 1 : 0; + seq_printf(seq, + " %d: %d\n", + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT, + has_capa); seq_puts(seq, "fw_api_ver:\n"); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index b36e9613a5..03f6e52014 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2008-2014, 2018-2021 Intel Corporation + * Copyright (C) 2008-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -281,12 +281,16 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59, - -#ifdef __CHECKER__ - /* sparse says it cannot increment the previous enum member */ -#define NUM_IWL_UCODE_TLV_API 128 -#else NUM_IWL_UCODE_TLV_API +/* + * This construction make both sparse (which cannot increment the previous + * member due to its bitwise type) and kernel-doc (which doesn't understand + * the ifdef/else properly) work. + */ +#ifdef __CHECKER__ +#define __CHECKER_NUM_IWL_UCODE_TLV_API 128 + = (__force iwl_ucode_tlv_api_t)__CHECKER_NUM_IWL_UCODE_TLV_API, +#define NUM_IWL_UCODE_TLV_API __CHECKER_NUM_IWL_UCODE_TLV_API #endif }; @@ -468,12 +472,18 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT = (__force iwl_ucode_tlv_capa_t)113, IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114, IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117, -#ifdef __CHECKER__ - /* sparse says it cannot increment the previous enum member */ -#define NUM_IWL_UCODE_TLV_CAPA 128 -#else NUM_IWL_UCODE_TLV_CAPA +/* + * This construction make both sparse (which cannot increment the previous + * member due to its bitwise type) and kernel-doc (which doesn't understand + * the ifdef/else properly) work. + */ +#ifdef __CHECKER__ +#define __CHECKER_NUM_IWL_UCODE_TLV_CAPA 128 + = (__force iwl_ucode_tlv_capa_t)__CHECKER_NUM_IWL_UCODE_TLV_CAPA, +#define NUM_IWL_UCODE_TLV_CAPA __CHECKER_NUM_IWL_UCODE_TLV_CAPA #endif }; @@ -965,4 +975,6 @@ static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv, _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), \ sizeof(_struct_ptr->_memb[0])) +#define iwl_tlv_array_len_with_size(_tlv_ptr, _struct_ptr, _size) \ + _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), _size) #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 8d0d58d618..96bda80632 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -198,7 +198,7 @@ struct iwl_dump_exclude { struct iwl_fw { u32 ucode_ver; - char fw_version[64]; + char fw_version[128]; /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h index 49e8ba11b6..0e49794911 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2005-2014, 2023 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_notif_wait_h__ @@ -25,6 +25,7 @@ struct iwl_notif_wait_data { * returns true, the wait is over, if it returns false then * the waiter stays blocked. If no function is given, any * of the listed commands will unblock the waiter. + * @fn_data: pointer to pass to the @fn's data argument * @cmds: command IDs * @n_cmds: number of command IDs * @triggered: waiter should be woken up diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c index b09e68dbf5..8f99e50162 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c @@ -208,7 +208,6 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps", iwl_rs_pretty_ant(ant), - index == IWL_RATE_INVALID ? "BAD" : iwl_rate_mcs(index)->mbps); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 7025869455..357727774d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -98,6 +98,8 @@ struct iwl_txf_iter_data { * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data + * @uats_enabled: VLP or AFC AP is enabled + * @uats_table: AP type table */ struct iwl_fw_runtime { struct iwl_trans *trans; @@ -171,6 +173,8 @@ struct iwl_fw_runtime { struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; u8 reduced_power_flags; + bool uats_enabled; + struct iwl_uats_table_cmd uats_table; #endif }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index 9877988db0..2964c5fb11 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -388,4 +388,54 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, kfree(data); } IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table); + +static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data, + struct iwl_fw_runtime *fwrt) +{ + if (uats_data->revision != 1) + return -EINVAL; + + memcpy(fwrt->uats_table.offset_map, uats_data->offset_map, + sizeof(fwrt->uats_table.offset_map)); + return 0; +} + +int iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_wlan_uats_data *data; + unsigned long package_size; + int ret; + + data = iwl_uefi_get_variable(IWL_UEFI_UATS_NAME, &IWL_EFI_VAR_GUID, + &package_size); + if (IS_ERR(data)) { + IWL_DEBUG_FW(trans, + "UATS UEFI variable not found 0x%lx\n", + PTR_ERR(data)); + return -EINVAL; + } + + if (package_size < sizeof(*data)) { + IWL_DEBUG_FW(trans, + "Invalid UATS table UEFI variable len (%lu)\n", + package_size); + kfree(data); + return -EINVAL; + } + + IWL_DEBUG_FW(trans, "Read UATS from UEFI with size %lu\n", + package_size); + + ret = iwl_uefi_uats_parse(data, fwrt); + if (ret < 0) { + IWL_DEBUG_FW(trans, "Cannot read UATS table. rev is invalid\n"); + kfree(data); + return ret; + } + + kfree(data); + return 0; +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table); #endif /* CONFIG_ACPI */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index 1369cc4855..bf61a8df12 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -9,8 +9,10 @@ #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" #define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping" #define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP" +#define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS" #define IWL_SGOM_MAP_SIZE 339 +#define IWL_UATS_MAP_SIZE 339 struct pnvm_sku_package { u8 rev; @@ -25,6 +27,11 @@ struct uefi_cnv_wlan_sgom_data { u8 offset_map[IWL_SGOM_MAP_SIZE - 1]; } __packed; +struct uefi_cnv_wlan_uats_data { + u8 revision; + u8 offset_map[IWL_UATS_MAP_SIZE - 1]; +} __packed; + struct uefi_cnv_common_step_data { u8 revision; u8 step_mode; @@ -82,10 +89,20 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, #if defined(CONFIG_EFI) && defined(CONFIG_ACPI) void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt); #else static inline void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { } + +static inline +int iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) +{ + return 0; +} + #endif #endif /* __iwl_fw_uefi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f45f645ca6..02ded22295 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -247,7 +247,6 @@ enum iwl_cfg_trans_ltr_delay { * RFID can be read before deciding the remaining parameters to use. * * @base_params: pointer to basic parameters - * @csr: csr flags and addresses that are different across devices * @device_family: the device family * @umac_prph_offset: offset to add to UMAC periphery address * @xtal_latency: power up latency to get the xtal stabilized @@ -316,7 +315,6 @@ struct iwl_fw_mon_regs { * @non_shared_ant: the antenna that is for WiFi only * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version - * @lib: pointer to the lib ops * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity @@ -341,15 +339,12 @@ struct iwl_fw_mon_regs { * @nvm_type: see &enum iwl_nvm_type * @d3_debug_data_base_addr: base address where D3 debug data is stored * @d3_debug_data_length: length of the D3 debug data - * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue * @uhb_supported: ultra high band channels supported * @min_ba_txq_size: minimum number of slots required in a TX queue which * based on hardware support (HE - 256, EHT - 1K). * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) - * @mac_addr_csr_base: CSR base register for MAC address access, if not set - * assume 0x380 * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index 96bf353469..1379dc2d23 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018, 2020-2022 Intel Corporation + * Copyright (C) 2018, 2020-2023 Intel Corporation */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ @@ -44,7 +44,7 @@ enum iwl_prph_scratch_mtr_format { * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for * multicomm. * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW - * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) + * @IWL_PRPH_SCRATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for * completion descriptor, 1 for responses (legacy) * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. @@ -187,11 +187,15 @@ struct iwl_prph_scratch_ctrl_cfg { * struct iwl_prph_scratch - peripheral scratch mapping * @ctrl_cfg: control and configuration of prph scratch * @dram: firmware images addresses in DRAM + * @fseq_override: FSEQ override parameters + * @step_analog_params: STEP analog calibration values * @reserved: reserved */ struct iwl_prph_scratch { struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; - __le32 reserved[10]; + __le32 fseq_override; + __le32 step_analog_params; + __le32 reserved[8]; struct iwl_context_info_dram dram; } __packed; /* PERIPH_SCRATCH_S */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 587368a0ad..a4df67ff21 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -313,6 +313,7 @@ enum { SILICON_C_STEP, SILICON_D_STEP, SILICON_E_STEP, + SILICON_TC_STEP = 0xe, SILICON_Z_STEP = 0xf, }; @@ -618,6 +619,7 @@ enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), MSIX_HW_INT_CAUSES_REG_IML = BIT(1), MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2), + MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR = BIT(3), MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 1df60d5190..9160d81a87 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -509,6 +509,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) if (res) return; + trans->dbg.yoyo_bin_loaded = true; + iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size); release_firmware(fw); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index 1455b57835..01fb7b900a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -3,17 +3,19 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(C) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018, 2023 Intel Corporation *****************************************************************************/ #ifndef __IWLWIFI_DEVICE_TRACE #include #include #include +#include #include "iwl-trans.h" #if !defined(__IWLWIFI_DEVICE_TRACE) static inline bool iwl_trace_data(struct sk_buff *skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; int offs = 24; /* start with normal header length */ @@ -21,6 +23,10 @@ static inline bool iwl_trace_data(struct sk_buff *skb) if (!ieee80211_is_data(fc)) return false; + /* If upper layers wanted TX status it's an important frame */ + if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) + return false; + /* Try to determine if the frame is EAPOL. This might have false * positives (if there's no RFC 1042 header and we compare to some * payload instead) but since we're only doing tracing that's not diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index fb5e254757..abf8001bda 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.ucode_capa.cmd_versions); kfree(drv->fw.phy_integration_ver); kfree(drv->trans->dbg.pc_data); + drv->trans->dbg.pc_data = NULL; for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -162,6 +163,8 @@ static inline char iwl_drv_get_step(int step) { if (step == SILICON_Z_STEP) return 'z'; + if (step == SILICON_TC_STEP) + return 'a'; return 'a' + step; } @@ -178,6 +181,8 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) mac_step = iwl_drv_get_step(trans->hw_rev_step); + rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id)); + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_HR1: case IWL_CFG_RF_TYPE_HR2: @@ -196,7 +201,13 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) rf = "fm"; break; case IWL_CFG_RF_TYPE_WH: - rf = "wh"; + if (SILICON_Z_STEP == + CSR_HW_RFID_STEP(trans->hw_rf_id)) { + rf = "whtc"; + rf_step = 'a'; + } else { + rf = "wh"; + } break; default: return "unknown-rf"; @@ -204,8 +215,6 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : ""; - rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id)); - scnprintf(buf, FW_NAME_PRE_BUFSIZE, "iwlwifi-%s-%c0-%s%s-%c0", trans->cfg->fw_name_mac, mac_step, @@ -1303,10 +1312,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_CURRENT_PC: if (tlv_len < sizeof(struct iwl_pc_data)) goto invalid_tlv_len; - drv->trans->dbg.num_pc = - tlv_len / sizeof(struct iwl_pc_data); drv->trans->dbg.pc_data = kmemdup(tlv_data, tlv_len, GFP_KERNEL); + if (!drv->trans->dbg.pc_data) + return -ENOMEM; + drv->trans->dbg.num_pc = + tlv_len / sizeof(struct iwl_pc_data); break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); @@ -1415,6 +1426,9 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) struct iwl_op_mode *op_mode = NULL; int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; + /* also protects start/stop from racing against each other */ + lockdep_assert_held(&iwlwifi_opmode_table_mtx); + for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1429,6 +1443,9 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) if (op_mode) return op_mode; + if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status)) + break; + IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1442,6 +1459,9 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) static void _iwl_op_mode_stop(struct iwl_drv *drv) { + /* also protects start/stop from racing against each other */ + lockdep_assert_held(&iwlwifi_opmode_table_mtx); + /* op_mode can be NULL if its start failed */ if (drv->op_mode) { iwl_op_mode_stop(drv->op_mode); @@ -1725,11 +1745,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } mutex_unlock(&iwlwifi_opmode_table_mtx); - /* - * Complete the firmware request last so that - * a driver unbind (stop) doesn't run while we - * are doing the start() above. - */ complete(&drv->request_firmware_complete); /* @@ -1834,11 +1849,12 @@ void iwl_drv_stop(struct iwl_drv *drv) { wait_for_completion(&drv->request_firmware_complete); + mutex_lock(&iwlwifi_opmode_table_mtx); + _iwl_op_mode_stop(drv); iwl_dealloc_ucode(drv); - mutex_lock(&iwlwifi_opmode_table_mtx); /* * List is empty (this item wasn't added) * when firmware loading failed -- in that diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 6c19989e4a..3d1a27ba35 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -56,7 +56,7 @@ struct iwl_cfg; /** * iwl_drv_start - start the drv * - * @trans_ops: the ops of the transport + * @trans: the transport * * starts the driver: fetches the firmware. This should be called by bus * specific system flows implementations. For example, the bus specific probe diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index d7a7835b93..5aab64c63a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation * Copyright (C) 2015 Intel Mobile Communications GmbH */ #include @@ -721,6 +721,9 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans, ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; ht_info->mcs.rx_mask[0] = 0xFF; + ht_info->mcs.rx_mask[1] = 0x00; + ht_info->mcs.rx_mask[2] = 0x00; + if (rx_chains >= 2) ht_info->mcs.rx_mask[1] = 0xFF; if (rx_chains >= 3) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 0e8ca761d2..34a178a2eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018, 2020-2023 Intel Corporation * Copyright (C) 2015 Intel Mobile Communications GmbH */ #ifndef __iwl_eeprom_parse_h__ @@ -61,7 +61,7 @@ struct iwl_nvm_data { /** * iwl_parse_eeprom_data - parse EEPROM data and return values * - * @dev: device pointer we're parsing for, for debug only + * @trans: ransport we're parsing for, for debug only * @cfg: device configuration for parsing and overrides * @eeprom: the EEPROM data * @eeprom_size: length of the EEPROM data diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 41ab5a6e2d..e0400ba2ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -681,12 +681,13 @@ struct iwl_tfh_tb { /** * struct iwl_tfd - Transmit Frame Descriptor (TFD) - * @ __reserved1[3] reserved - * @ num_tbs 0-4 number of active tbs - * 5 reserved - * 6-7 padding (not used) - * @ tbs[20] transmit frame buffer descriptors - * @ __pad padding + * @__reserved1: reserved + * @num_tbs: + * 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @tbs: transmit frame buffer descriptors + * @__pad: padding */ struct iwl_tfd { u8 __reserved1[3]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index e3120ab893..678c4a0718 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -668,10 +668,10 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = - IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | - IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 | + IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | @@ -792,7 +792,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = - IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, @@ -962,6 +961,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, } } } else { + struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp = + &iftype_data->he_cap.he_mcs_nss_supp; + if (iftype_data->eht_cap.has_eht) { struct ieee80211_eht_mcs_nss_supp *mcs_nss = &iftype_data->eht_cap.eht_mcs_nss_supp; @@ -980,6 +982,19 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_MAX_NC_1; } + + he_mcs_nss_supp->rx_mcs_80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->tx_mcs_80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->rx_mcs_160 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->tx_mcs_160 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->rx_mcs_80p80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->tx_mcs_80p80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap) @@ -1003,8 +1018,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && iftype_data->eht_cap.has_eht) { iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &= - ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | - IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2); iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &= ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | @@ -1053,10 +1067,6 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, struct ieee80211_sband_iftype_data *iftype_data; int i; - /* should only initialize once */ - if (WARN_ON(sband->iftype_data)) - return; - BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_eht_capa)); BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_eht_capa)); BUILD_BUG_ON(sizeof(data->iftd.uhb) != sizeof(iwl_he_eht_capa)); @@ -1078,8 +1088,8 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, memcpy(iftype_data, iwl_he_eht_capa, sizeof(iwl_he_eht_capa)); - sband->iftype_data = iftype_data; - sband->n_iftype_data = ARRAY_SIZE(iwl_he_eht_capa); + _ieee80211_set_sband_iftype_data(sband, iftype_data, + ARRAY_SIZE(iwl_he_eht_capa)); for (i = 0; i < sband->n_iftype_data; i++) iwl_nvm_fixup_sband_iftd(trans, data, sband, &iftype_data[i], @@ -1088,6 +1098,37 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains); } +void iwl_reinit_cab(struct iwl_trans *trans, struct iwl_nvm_data *data, + u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) +{ + struct ieee80211_supported_band *sband; + + sband = &data->bands[NL80211_BAND_2GHZ]; + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ, + tx_chains, rx_chains); + + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, + fw); + + sband = &data->bands[NL80211_BAND_5GHZ]; + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ, + tx_chains, rx_chains); + if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) + iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, + tx_chains, rx_chains); + + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, + fw); + + sband = &data->bands[NL80211_BAND_6GHZ]; + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, + fw); +} +IWL_EXPORT_SYMBOL(iwl_reinit_cab); + static void iwl_init_sbands(struct iwl_trans *trans, struct iwl_nvm_data *data, const void *nvm_ch_flags, u8 tx_chains, @@ -1366,7 +1407,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, - const struct iwl_fw *fw) + const struct iwl_fw *fw, u8 tx_ant, u8 rx_ant) { struct iwl_nvm_data *data; u32 sbands_flags = 0; @@ -1393,6 +1434,10 @@ iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, tx_chains &= data->valid_tx_ant; if (data->valid_rx_ant) rx_chains &= data->valid_rx_ant; + if (tx_ant) + tx_chains &= tx_ant; + if (rx_ant) + rx_chains &= rx_ant; data->sku_cap_mimo_disabled = false; data->sku_cap_band_24ghz_enable = true; @@ -1958,7 +2003,8 @@ out: IWL_EXPORT_SYMBOL(iwl_read_external_nvm); struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, - const struct iwl_fw *fw) + const struct iwl_fw *fw, + u8 set_tx_ant, u8 set_rx_ant) { struct iwl_nvm_get_info cmd = {}; struct iwl_nvm_data *nvm; @@ -1972,6 +2018,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; + u8 tx_ant; + u8 rx_ant; + /* * All the values in iwl_nvm_get_info_rsp v4 are the same as * in v3, except for the channel profile part of the @@ -2059,10 +2108,15 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : (void *)rsp_v3->regulatory.channel_profile; - iwl_init_sbands(trans, nvm, - channel_profile, - nvm->valid_tx_ant & fw->valid_tx_ant, - nvm->valid_rx_ant & fw->valid_rx_ant, + tx_ant = nvm->valid_tx_ant & fw->valid_tx_ant; + rx_ant = nvm->valid_rx_ant & fw->valid_rx_ant; + + if (set_tx_ant) + tx_ant &= set_tx_ant; + if (set_rx_ant) + rx_ant &= set_rx_ant; + + iwl_init_sbands(trans, nvm, channel_profile, tx_ant, rx_ant, sbands_flags, v4, fw); iwl_free_resp(&hcmd); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index c79f72d544..651ed25b68 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2022 Intel Corporation + * Copyright (C) 2005-2015, 2018-2023 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ @@ -21,7 +21,7 @@ enum iwl_nvm_sbands_flags { IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1), }; -/** +/* * iwl_parse_nvm_data - parse NVM data and return values * * This function parses all NVM values we need and then @@ -73,21 +73,28 @@ int iwl_read_external_nvm(struct iwl_trans *trans, void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len); -/** +/* * iwl_get_nvm - retrieve NVM data from firmware * * Allocates a new iwl_nvm_data structure, fills it with * NVM data, and returns it to caller. */ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, - const struct iwl_fw *fw); + const struct iwl_fw *fw, + u8 set_tx_ant, u8 set_rx_ant); -/** +/* * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data */ struct iwl_nvm_data * iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, - const struct iwl_fw *fw); + const struct iwl_fw *fw, u8 set_tx_ant, u8 set_rx_ant); + +/* + * iwl_reinit_cab - to be called when the tx_chains or rx_chains are modified + */ +void iwl_reinit_cab(struct iwl_trans *trans, struct iwl_nvm_data *data, + u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 2a63968b0e..dd32c287b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -365,7 +365,6 @@ #define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF enum { - ENABLE_WFPM = BIT(31), WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, }; @@ -383,7 +382,7 @@ enum { #define PREG_PRPH_WPROT_22000 0xA04D00 #define SB_MODIFY_CFG_FLAG 0xA03088 -#define SB_CFG_RESIDES_IN_OTP_MASK 0x10 +#define SB_CFG_RESIDES_IN_ROM 0x80 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 @@ -424,14 +423,14 @@ enum { * reserved: bits 12-18 * slave_exist: bit 19 * dash: bits 20-23 - * step: bits 24-26 - * flavor: bits 27-31 + * step: bits 24-27 + * flavor: bits 28-31 */ #define REG_CRF_ID_TYPE(val) (((val) & 0x00000FFF) >> 0) #define REG_CRF_ID_SLAVE(val) (((val) & 0x00080000) >> 19) #define REG_CRF_ID_DASH(val) (((val) & 0x00F00000) >> 20) -#define REG_CRF_ID_STEP(val) (((val) & 0x07000000) >> 24) -#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF8000000) >> 27) +#define REG_CRF_ID_STEP(val) (((val) & 0x0F000000) >> 24) +#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF0000000) >> 28) #define UREG_CHICK (0xA05C00) #define UREG_CHICK_MSI_ENABLE BIT(24) @@ -452,6 +451,7 @@ enum { #define REG_CRF_ID_TYPE_FM 0x910 #define REG_CRF_ID_TYPE_FMI 0x930 #define REG_CRF_ID_TYPE_FMR 0x900 +#define REG_CRF_ID_TYPE_WHP 0xA10 #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) @@ -516,4 +516,8 @@ enum { #define WFPM_LMAC2_PD_NOTIFICATION 0xA033CC #define WFPM_LMAC2_PD_RE_READ BIT(31) +#define DPHYIP_INDIRECT 0xA2D800 +#define DPHYIP_INDIRECT_RD_MSK 0xFF000000 +#define DPHYIP_INDIRECT_RD_SHIFT 24 + #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 168eda2132..05e72a2125 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -109,6 +109,7 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * @CMD_ASYNC: Return right away and don't wait for the response * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of * the response. The caller needs to call iwl_free_resp when done. + * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill. * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be * called after this command completes. Valid only with CMD_ASYNC. * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to @@ -278,7 +279,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define IWL_MGMT_TID 15 #define IWL_FRAME_LIMIT 64 #define IWL_MAX_RX_HW_QUEUES 16 -#define IWL_9000_MAX_RX_HW_QUEUES 6 +#define IWL_9000_MAX_RX_HW_QUEUES 1 /** * enum iwl_wowlan_status - WoWLAN image/device status @@ -738,6 +739,7 @@ struct iwl_dram_data { }; /** + * struct iwl_dram_regions - DRAM regions container structure * @drams: array of several DRAM areas that contains the pnvm and power * reduction table payloads. * @n_regions: number of DRAM regions that were allocated @@ -837,6 +839,7 @@ struct iwl_pc_data { * @dump_file_name_ext_valid: dump file name extension if valid or not * @num_pc: number of program counter for cpu * @pc_data: details of the program counter + * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded */ struct iwl_trans_debug { u8 n_dest_reg; @@ -866,8 +869,7 @@ struct iwl_trans_debug { u64 unsupported_region_msk; struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID]; struct list_head debug_info_tlv_list; - struct iwl_dbg_tlv_time_point_data - time_point[IWL_FW_INI_TIME_POINT_NUM]; + struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM]; struct list_head periodic_trig_list; u32 domains_bitmap; @@ -879,6 +881,7 @@ struct iwl_trans_debug { bool dump_file_name_ext_valid; u32 num_pc; struct iwl_pc_data *pc_data; + bool yoyo_bin_loaded; }; struct iwl_dma_ptr { @@ -920,7 +923,6 @@ struct iwl_pcie_first_tb_buf { /** * struct iwl_txq - Tx Queue for DMA - * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) * @first_tb_bufs: start of command headers, including scratch buffers, for * the writeback -- this is DMA memory and an array holding one buffer @@ -1064,11 +1066,10 @@ struct iwl_trans_txqs { * starting the firmware, used for tracing * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the * start of the 802.11 header in the @rx_mpdu_cmd - * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. - * @iwl_trans_txqs: transport tx queues data. + * @txqs: transport tx queues data. * @mbx_addr_0_step: step address data 0 * @mbx_addr_1_step: step address data 1 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h index 655d95d3a0..1f3c885aeb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h +++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2021 - 2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation */ #ifndef __iwl_mei_h__ @@ -493,7 +493,7 @@ static inline void iwl_mei_set_power_limit(__le16 *power_limit) static inline int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) -{ return 0; } +{ return -EOPNOTSUPP; } static inline void iwl_mei_start_unregister(void) {} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 243eccc68c..c832068b57 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -60,6 +60,7 @@ #define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */ #define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM #define IWL_MVM_NON_TRANSMITTING_AP 0 +#define IWL_MVM_CONN_LISTEN_INTERVAL 10 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 @@ -118,5 +119,6 @@ #define IWL_MVM_DISABLE_AP_FILS false #define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ +#define IWL_MVM_AUTO_EML_ENABLE true #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index be2602d8c5..92c45571bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -818,7 +818,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) IWL_ERR(mvm, "Failed to send quota: %d\n", ret); - if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) + if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm, false)) IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); return 0; @@ -1438,6 +1438,7 @@ struct iwl_wowlan_status_data { } ptk; struct iwl_multicast_key_data igtk; + struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM]; u8 *wake_packet; }; @@ -1781,8 +1782,8 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; struct iwl_wowlan_status_data *status; - u32 gtk_cipher, igtk_cipher; - bool unhandled_cipher, igtk_support; + u32 gtk_cipher, igtk_cipher, bigtk_cipher; + bool unhandled_cipher, igtk_support, bigtk_support; int num_keys; }; @@ -1817,6 +1818,9 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw, if (data->igtk_support && (key->keyidx == 4 || key->keyidx == 5)) { data->igtk_cipher = key->cipher; + } else if (data->bigtk_support && + (key->keyidx == 6 || key->keyidx == 7)) { + data->bigtk_cipher = key->cipher; } else { data->unhandled_cipher = true; return; @@ -1848,6 +1852,24 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key, } } +static void +iwl_mvm_d3_update_igtk_bigtk(struct iwl_wowlan_status_data *status, + struct ieee80211_key_conf *key, + struct iwl_multicast_key_data *key_data) +{ + if (status->num_of_gtk_rekeys && key_data->len) { + /* remove rekeyed key */ + ieee80211_remove_key(key); + } else { + struct ieee80211_key_seq seq; + + iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, + &seq, + key->cipher); + ieee80211_set_key_rx_seq(key, 0, &seq); + } +} + static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -1900,17 +1922,14 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_AES_CMAC: if (key->keyidx == 4 || key->keyidx == 5) { - /* remove rekeyed key */ - if (status->num_of_gtk_rekeys) { - ieee80211_remove_key(key); - } else { - struct ieee80211_key_seq seq; + iwl_mvm_d3_update_igtk_bigtk(status, key, + &status->igtk); + } + if (key->keyidx == 6 || key->keyidx == 7) { + u8 idx = key->keyidx == status->bigtk[1].id; - iwl_mvm_d3_set_igtk_bigtk_ipn(&status->igtk, - &seq, - key->cipher); - ieee80211_set_key_rx_seq(key, 0, &seq); - } + iwl_mvm_d3_update_igtk_bigtk(status, key, + &status->bigtk[idx]); } } } @@ -2052,6 +2071,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, .mvm = mvm, .status = status, }; + int i; + u32 disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; @@ -2068,6 +2089,11 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 0)) gtkdata.igtk_support = true; + if (iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + 0) >= 3) + gtkdata.bigtk_support = true; + /* find last GTK that we used initially, if any */ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_find_last_keys, >kdata); @@ -2098,6 +2124,13 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, &status->igtk)) return false; + for (i = 0; i < ARRAY_SIZE(status->bigtk); i++) { + if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif, + gtkdata.bigtk_cipher, + &status->bigtk[i])) + return false; + } + ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } @@ -2182,6 +2215,37 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn)); } +static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status, + const struct iwl_wowlan_igtk_status *data) +{ + int data_idx, status_idx = 0; + + BUILD_BUG_ON(ARRAY_SIZE(status->bigtk) < WOWLAN_BIGTK_KEYS_NUM); + + for (data_idx = 0; data_idx < WOWLAN_BIGTK_KEYS_NUM; data_idx++) { + if (!data[data_idx].key_len) + continue; + + status->bigtk[status_idx].len = data[data_idx].key_len; + status->bigtk[status_idx].flags = data[data_idx].key_flags; + status->bigtk[status_idx].id = + u32_get_bits(data[data_idx].key_flags, + IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) + + WOWLAN_BIGTK_MIN_INDEX; + + BUILD_BUG_ON(sizeof(status->bigtk[status_idx].key) < + sizeof(data[data_idx].key)); + BUILD_BUG_ON(sizeof(status->bigtk[status_idx].ipn) < + sizeof(data[data_idx].ipn)); + + memcpy(status->bigtk[status_idx].key, data[data_idx].key, + sizeof(data[data_idx].key)); + memcpy(status->bigtk[status_idx].ipn, data[data_idx].ipn, + sizeof(data[data_idx].ipn)); + status_idx++; + } +} + static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, struct iwl_wowlan_info_notif *data, struct iwl_wowlan_status_data *status, @@ -2204,7 +2268,42 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); iwl_mvm_convert_gtk_v3(status, data->gtk); iwl_mvm_convert_igtk(status, &data->igtk[0]); + iwl_mvm_convert_bigtk(status, data->bigtk); + status->replay_ctr = le64_to_cpu(data->replay_ctr); + status->pattern_number = le16_to_cpu(data->pattern_number); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) + status->qos_seq_ctr[i] = + le16_to_cpu(data->qos_seq_ctr[i]); + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); + status->num_of_gtk_rekeys = + le32_to_cpu(data->num_of_gtk_rekeys); + status->received_beacons = le32_to_cpu(data->received_beacons); + status->tid_tear_down = data->tid_tear_down; +} + +static void +iwl_mvm_parse_wowlan_info_notif_v2(struct iwl_mvm *mvm, + struct iwl_wowlan_info_notif_v2 *data, + struct iwl_wowlan_status_data *status, + u32 len) +{ + u32 i; + + if (!data) { + IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); + status = NULL; + return; + } + + if (len < sizeof(*data)) { + IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); + status = NULL; + return; + } + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, data->gtk); + iwl_mvm_convert_igtk(status, &data->igtk[0]); status->replay_ctr = le64_to_cpu(data->replay_ctr); status->pattern_number = le16_to_cpu(data->pattern_number); for (i = 0; i < IWL_MAX_TID_COUNT; i++) @@ -2876,7 +2975,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_d3_data *d3_data = data; - u32 len; + u32 len = iwl_rx_packet_payload_len(pkt); int ret; int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, @@ -2886,7 +2985,6 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): { - struct iwl_wowlan_info_notif *notif; if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) { /* We might get two notifications due to dual bss */ @@ -2896,26 +2994,39 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, } if (wowlan_info_ver < 2) { - struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; + struct iwl_wowlan_info_notif_v1 *notif_v1 = + (void *)pkt->data; + struct iwl_wowlan_info_notif_v2 *notif_v2; - notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); - if (!notif) + notif_v2 = kmemdup(notif_v1, sizeof(*notif_v2), GFP_ATOMIC); + + if (!notif_v2) return false; - notif->tid_tear_down = notif_v1->tid_tear_down; - notif->station_id = notif_v1->station_id; - memset_after(notif, 0, station_id); + notif_v2->tid_tear_down = notif_v1->tid_tear_down; + notif_v2->station_id = notif_v1->station_id; + memset_after(notif_v2, 0, station_id); + iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2, + d3_data->status, + len); + kfree(notif_v2); + + } else if (wowlan_info_ver == 2) { + struct iwl_wowlan_info_notif_v2 *notif_v2 = + (void *)pkt->data; + + iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2, + d3_data->status, + len); } else { - notif = (void *)pkt->data; + struct iwl_wowlan_info_notif *notif = + (void *)pkt->data; + + iwl_mvm_parse_wowlan_info_notif(mvm, notif, + d3_data->status, len); } d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO; - len = iwl_rx_packet_payload_len(pkt); - iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status, - len); - - if (wowlan_info_ver < 2) - kfree(notif); if (d3_data->status && d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index cb4ecad610..e8b881596b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -699,19 +699,11 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); - -void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct dentry *dbgfs_dir = vif->debugfs_dir; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[100]; - - /* - * Check if debugfs directory already exist before creating it. - * This may happen when, for example, resetting hw or suspend-resume - */ - if (!dbgfs_dir || mvmvif->dbgfs_dir) - return; mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) { @@ -737,6 +729,17 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); +} + +void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct dentry *dbgfs_dir = vif->debugfs_dir; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[100]; + + /* this will happen in monitor mode */ + if (!dbgfs_dir) + return; /* * Create symlink for convenience pointing to interface specific @@ -745,21 +748,62 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * find * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ */ - snprintf(buf, 100, "../../../%pd3/%pd", - dbgfs_dir, - mvmvif->dbgfs_dir); + snprintf(buf, 100, "../../../%pd3/iwlmvm", dbgfs_dir); mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, mvm->debugfs_dir, buf); } -void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); debugfs_remove(mvmvif->dbgfs_slink); mvmvif->dbgfs_slink = NULL; +} + +#define MVM_DEBUGFS_WRITE_LINK_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, \ + struct ieee80211_bss_conf) +#define MVM_DEBUGFS_READ_WRITE_LINK_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(link_##name, bufsz, \ + struct ieee80211_bss_conf) +#define MVM_DEBUGFS_ADD_LINK_FILE(name, parent, mode) \ + debugfs_create_file(#name, mode, parent, link_conf, \ + &iwl_dbgfs_link_##name##_ops) + +static void iwl_mvm_debugfs_add_link_files(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *mvm_dir) +{ + /* Add per-link files here*/ +} + +void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct dentry *mvm_dir; + + if (WARN_ON(!link_info) || !dir) + return; + + if (dir == vif->debugfs_dir) { + WARN_ON(!mvmvif->dbgfs_dir); + mvm_dir = mvmvif->dbgfs_dir; + } else { + mvm_dir = debugfs_create_dir("iwlmvm", dir); + if (IS_ERR_OR_NULL(mvm_dir)) { + IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", + dir); + return; + } + } - debugfs_remove_recursive(mvmvif->dbgfs_dir); - mvmvif->dbgfs_dir = NULL; + iwl_mvm_debugfs_add_link_files(vif, link_conf, mvm_dir); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 7057421e51..7737650e56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -50,8 +50,18 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; + bool force; - if (!iwl_mvm_is_ctdp_supported(mvm)) + if (!kstrtobool(buf, &force)) + IWL_DEBUG_INFO(mvm, + "force start is %d [0=disabled, 1=enabled]\n", + force); + + /* we allow skipping cap support check and force stop ctdp + * statistics collection and with guerantee that it is + * safe to use. + */ + if (!force && !iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || @@ -65,6 +75,36 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int ret; + bool force; + + if (!kstrtobool(buf, &force)) + IWL_DEBUG_INFO(mvm, + "force start is %d [0=disabled, 1=enabled]\n", + force); + + /* we allow skipping cap support check and force enable ctdp + * for statistics collection and with guerantee that it is + * safe to use. + */ + if (!force && !iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 0); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { @@ -965,6 +1005,13 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, char *buf; int ret; size_t bufsz; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + return -EOPNOTSUPP; if (iwl_mvm_has_new_rx_stats_api(mvm)) bufsz = ((sizeof(struct mvm_statistics_rx) / @@ -1144,6 +1191,101 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, } #undef PRINT_STAT_LE32 +static ssize_t iwl_dbgfs_fw_system_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff, *pos, *endpos; + int ret; + size_t bufsz; + int i; + struct iwl_mvm_vif *mvmvif; + struct ieee80211_vif *vif; + struct iwl_mvm *mvm = file->private_data; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); + + /* in case of a wrong cmd version, allocate buffer only for error msg */ + bufsz = (cmd_ver == 1) ? 4096 : 64; + + buff = kzalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + pos = buff; + endpos = pos + bufsz; + + if (cmd_ver != 1) { + pos += scnprintf(pos, endpos - pos, + "System stats not supported:%d\n", cmd_ver); + goto send_out; + } + + mutex_lock(&mvm->mutex); + if (iwl_mvm_firmware_running(mvm)) + iwl_mvm_request_statistics(mvm, false); + + for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); + if (!vif) + continue; + + if (vif->type == NL80211_IFTYPE_STATION) + break; + } + + if (i == NUM_MAC_INDEX_DRIVER || !vif) { + pos += scnprintf(pos, endpos - pos, "vif is NULL\n"); + goto release_send_out; + } + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (!mvmvif) { + pos += scnprintf(pos, endpos - pos, "mvmvif is NULL\n"); + goto release_send_out; + } + + for_each_mvm_vif_valid_link(mvmvif, i) { + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[i]; + + pos += scnprintf(pos, endpos - pos, + "link_id %d", i); + pos += scnprintf(pos, endpos - pos, + " num_beacons %d", + link_info->beacon_stats.num_beacons); + pos += scnprintf(pos, endpos - pos, + " accu_num_beacons %d", + link_info->beacon_stats.accu_num_beacons); + pos += scnprintf(pos, endpos - pos, + " avg_signal %d\n", + link_info->beacon_stats.avg_signal); + } + + pos += scnprintf(pos, endpos - pos, + "radio_stats.rx_time %lld\n", + mvm->radio_stats.rx_time); + pos += scnprintf(pos, endpos - pos, + "radio_stats.tx_time %lld\n", + mvm->radio_stats.tx_time); + pos += scnprintf(pos, endpos - pos, + "accu_radio_stats.rx_time %lld\n", + mvm->accu_radio_stats.rx_time); + pos += scnprintf(pos, endpos - pos, + "accu_radio_stats.tx_time %lld\n", + mvm->accu_radio_stats.tx_time); + +release_send_out: + mutex_unlock(&mvm->mutex); + +send_out: + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + + return ret; +} + static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, char __user *user_buf, size_t count, loff_t *ppos, @@ -1998,6 +2140,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); @@ -2012,6 +2155,7 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); +MVM_DEBUGFS_READ_FILE_OPS(fw_system_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); MVM_DEBUGFS_READ_FILE_OPS(tas_get_status); @@ -2210,6 +2354,7 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); @@ -2218,6 +2363,7 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h index 0711ab689c..cc2c45b45d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* + * Copyright (C) 2023 Intel Corporation * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 1d5ee4330f..403bd17b8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -15,6 +15,7 @@ #include "iwl-prph.h" #include "fw/acpi.h" #include "fw/pnvm.h" +#include "fw/uefi.h" #include "mvm.h" #include "fw/dbg.h" @@ -23,12 +24,15 @@ #include "iwl-nvm-parse.h" #include "time-sync.h" -#define MVM_UCODE_ALIVE_TIMEOUT (HZ) +#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) #define IWL_TAS_US_MCC 0x5553 #define IWL_TAS_CANADA_MCC 0x4341 +#define IWL_UATS_VLP_AP_SUPPORTED BIT(29) +#define IWL_UATS_AFC_AP_SUPPORTED BIT(30) + struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -487,6 +491,52 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, } #if defined(CONFIG_ACPI) && defined(CONFIG_EFI) +static void iwl_mvm_uats_init(struct iwl_mvm *mvm) +{ + u8 cmd_ver; + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + UATS_TABLE_CMD), + .flags = 0, + .data[0] = &mvm->fwrt.uats_table, + .len[0] = sizeof(mvm->fwrt.uats_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!(mvm->trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210)) { + IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n"); + return; + } + + if (!mvm->fwrt.uats_enabled) { + IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n"); + return; + } + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver != 1) { + IWL_DEBUG_RADIO(mvm, + "UATS_TABLE_CMD ver %d not supported\n", + cmd_ver); + return; + } + + ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); + if (ret < 0) { + IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret); + return; + } + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret); + else + IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n"); +} + static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) { u8 cmd_ver; @@ -526,6 +576,10 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) { return 0; } + +static void iwl_mvm_uats_init(struct iwl_mvm *mvm) +{ +} #endif static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) @@ -583,6 +637,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, }; + u32 sb_cfg; int ret; if (mvm->trans->cfg->tx_with_siso_diversity) @@ -592,6 +647,14 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) mvm->rfkill_safe_init_done = false; + if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); + /* if needed, we'll reset this on our way out later */ + mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM; + if (mvm->pldr_sync && iwl_mei_pldr_req()) + return -EBUSY; + } + iwl_init_notification_wait(&mvm->notif_wait, &init_wait, init_complete, @@ -605,6 +668,13 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + + /* if we needed reset then fail here, but notify and remove */ + if (mvm->pldr_sync) { + iwl_mei_alive_notif(false); + iwl_trans_pcie_remove(mvm->trans, true); + } + goto error; } iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, @@ -667,7 +737,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { - mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); + mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw, + mvm->set_tx_ant, mvm->set_rx_ant); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); mvm->nvm_data = NULL; @@ -1084,6 +1155,12 @@ static const struct dmi_system_id dmi_tas_approved_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), }, }, + { .ident = "GOOGLE-HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + }, + }, { .ident = "MSI", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), @@ -1209,7 +1286,10 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { int ret; u32 value; - struct iwl_lari_config_change_cmd_v6 cmd = {}; + struct iwl_lari_config_change_cmd_v7 cmd = {}; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), 1); cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); @@ -1227,8 +1307,11 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ACTIVATE_CHANNEL, &iwl_guid, &value); - if (!ret) + if (!ret) { + if (cmd_ver < 8) + value &= ~ACTIVATE_5G2_IN_WW_MASK; cmd.chan_state_active_bitmap = cpu_to_le32(value); + } ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ENABLE_6E, @@ -1242,18 +1325,26 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) if (!ret) cmd.force_disable_channels_bitmap = cpu_to_le32(value); + ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, + DSM_FUNC_ENERGY_DETECTION_THRESHOLD, + &iwl_guid, &value); + if (!ret) + cmd.edt_bitmap = cpu_to_le32(value); + if (cmd.config_bitmap || cmd.oem_uhb_allow_bitmap || cmd.oem_11ax_allow_bitmap || cmd.oem_unii4_allow_bitmap || cmd.chan_state_active_bitmap || - cmd.force_disable_channels_bitmap) { + cmd.force_disable_channels_bitmap || + cmd.edt_bitmap) { size_t cmd_size; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE), - 1); + switch (cmd_ver) { + case 8: + case 7: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); + break; case 6: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); break; @@ -1287,6 +1378,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", le32_to_cpu(cmd.oem_uhb_allow_bitmap), le32_to_cpu(cmd.force_disable_channels_bitmap)); + IWL_DEBUG_RADIO(mvm, + "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n", + le32_to_cpu(cmd.edt_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), @@ -1296,6 +1390,10 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) "Failed to send LARI_CONFIG_CHANGE (%d)\n", ret); } + + if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED || + le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED) + mvm->fwrt.uats_enabled = TRUE; } void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) @@ -1499,10 +1597,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; - struct ieee80211_channel *chan; - struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband = NULL; - u32 sb_cfg; lockdep_assert_held(&mvm->mutex); @@ -1510,11 +1605,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) return ret; - sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); - mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK); - if (mvm->pldr_sync && iwl_mei_pldr_req()) - return -EBUSY; - ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); @@ -1527,6 +1617,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* FW loaded successfully */ mvm->pldr_sync = false; + iwl_fw_disable_dbg_asserts(&mvm->fwrt); iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); @@ -1630,21 +1721,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - chan = &sband->channels[0]; - - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - for (i = 0; i < NUM_PHY_CTX; i++) { - /* - * The channel used here isn't relevant as it's - * going to be overwritten in the other flows. - * For now use the first channel we have. - */ - ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], - &chandef, 1, 1); - if (ret) - goto error; - } - if (iwl_mvm_is_tt_in_fw(mvm)) { /* in order to give the responsibility of ct-kill and * TX backoff to FW we need to send empty temperature reporting @@ -1727,6 +1803,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); + iwl_mvm_uats_init(mvm); if (iwl_rfi_supported(mvm)) { if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index 4ab55a1fcb..be48b0fc9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -242,9 +242,10 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_link_config_cmd cmd = {}; int ret; + /* mac80211 thought we have the link, but it was never configured */ if (WARN_ON(!link_info || link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) - return -EINVAL; + return 0; RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id], NULL); @@ -252,6 +253,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id); link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; cmd.spec_link_id = link_conf->link_id; + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 9c97691e60..c4f96125cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1083,6 +1083,19 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, sizeof(beacon_cmd)); } +bool iwl_mvm_enable_fils(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx) +{ + if (IWL_MVM_DISABLE_AP_FILS) + return false; + + if (cfg80211_channel_is_psc(ctx->def.chan)) + return true; + + return (ctx->def.chan->band == NL80211_BAND_6GHZ && + ctx->def.width >= NL80211_CHAN_WIDTH_80); +} + static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, @@ -1102,8 +1115,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, ctx = rcu_dereference(link_conf->chanctx_conf); channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); WARN_ON(channel == 0); - if (cfg80211_channel_is_psc(ctx->def.chan) && - !IWL_MVM_DISABLE_AP_FILS) { + if (iwl_mvm_enable_fils(mvm, ctx)) { flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 10 ? IWL_MAC_BEACON_FILS : @@ -1761,6 +1773,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, u32 id_n_color, csa_id; /* save mac_id or link_id to use later to cancel csa if needed */ u32 id; + u32 mac_link_id = 0; u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, 0); bool csa_active; @@ -1790,6 +1803,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, goto out_unlock; id = link_id; + mac_link_id = bss_conf->link_id; vif = bss_conf->vif; csa_active = bss_conf->csa_active; } @@ -1839,7 +1853,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); - ieee80211_chswitch_done(vif, true); + ieee80211_chswitch_done(vif, true, mac_link_id); break; default: /* should never happen */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a25ea63822..350dc6b8a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -186,7 +186,7 @@ struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, MCC_SOURCE_OLD_FW, changed); } -int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) +int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync) { enum iwl_mcc_source used_src; struct ieee80211_regdomain *regd; @@ -213,8 +213,10 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) if (IS_ERR_OR_NULL(regd)) return -EIO; - /* update cfg80211 if the regdomain was changed */ - if (changed) + /* update cfg80211 if the regdomain was changed or the caller explicitly + * asked to update regdomain + */ + if (changed || force_regd_sync) ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); else ret = 0; @@ -279,6 +281,30 @@ int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) return 0; } +int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* This has been tested on those devices only */ + if (mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 && + mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000) + return -ENOTSUPP; + + if (!mvm->nvm_data) + return -EBUSY; + + /* mac80211 ensures the device is not started, + * so the firmware cannot be running + */ + + mvm->set_tx_ant = tx_ant; + mvm->set_rx_ant = rx_ant; + + iwl_reinit_cab(mvm->trans, mvm->nvm_data, tx_ant, rx_ant, mvm->fw); + + return 0; +} + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -352,7 +378,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, HAS_RATE_CONTROL); } - if (iwl_mvm_has_new_rx_api(mvm)) + /* We want to use the mac80211's reorder buffer for 9000 */ + if (iwl_mvm_has_new_rx_api(mvm) && + mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, @@ -498,7 +526,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ARRAY_SIZE(iwl_mvm_iface_combinations); hw->wiphy->max_remain_on_channel_duration = 10000; - hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + hw->max_listen_interval = IWL_MVM_CONN_LISTEN_INTERVAL; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); @@ -1033,6 +1061,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, spin_unlock_bh(&mvm->time_event_lock); memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); + mvmvif->ap_sta = NULL; for_each_mvm_vif_valid_link(mvmvif, link_id) { mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; @@ -1169,18 +1198,8 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw) for (retry = 0; retry <= max_retry; retry++) { ret = __iwl_mvm_mac_start(mvm); - if (!ret) - break; - - /* - * In PLDR sync PCI re-enumeration is needed. no point to retry - * mac start before that. - */ - if (mvm->pldr_sync) { - iwl_mei_alive_notif(false); - iwl_trans_pcie_remove(mvm->trans, true); + if (!ret || mvm->pldr_sync) break; - } IWL_ERR(mvm, "mac start retry %d\n", retry); } @@ -1370,7 +1389,8 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -1380,10 +1400,11 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; + unsigned int link_id = link_conf->link_id; + u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id; mvmvif->csa_bcn_pending = false; - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->deflink.ap_sta_id); + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; @@ -1452,7 +1473,8 @@ void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, mvmvif->csa_failed = true; mutex_unlock(&mvm->mutex); - iwl_mvm_post_channel_switch(hw, vif); + /* If we're here, we can't support MLD */ + iwl_mvm_post_channel_switch(hw, vif, &vif->bss_conf); } void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) @@ -1464,7 +1486,7 @@ void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); /* Trigger disconnect (should clear the CSA state) */ - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); } static u8 @@ -1517,6 +1539,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int i; mutex_lock(&mvm->mutex); @@ -1533,8 +1556,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - mvmvif->deflink.beacon_stats.accu_num_beacons += - mvmvif->deflink.beacon_stats.num_beacons; + for_each_mvm_vif_valid_link(mvmvif, i) + mvmvif->link[i]->beacon_stats.accu_num_beacons += + mvmvif->link[i]->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); @@ -1562,7 +1586,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - iwl_mvm_vif_dbgfs_register(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); ret = 0; goto out; } @@ -1602,7 +1627,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); } - iwl_mvm_vif_dbgfs_register(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -1619,11 +1645,6 @@ out: goto out_unlock; - if (mvm->bf_allowed_vif == mvmvif) { - mvm->bf_allowed_vif = NULL; - vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | - IEEE80211_VIF_SUPPORTS_CQM_RSSI); - } out_remove_mac: mvmvif->deflink.phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); @@ -1685,7 +1706,7 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); - iwl_mvm_vif_dbgfs_clean(mvm, vif); + iwl_mvm_vif_dbgfs_rm_link(mvm, vif); /* * For AP/GO interface, the tear down of the resources allocated to the @@ -2449,7 +2470,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, } void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 duration_override) + u32 duration_override, unsigned int link_id) { u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; @@ -2469,7 +2490,8 @@ void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, 900, - min_duration, false); + min_duration, false, + link_id); else iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); @@ -2563,6 +2585,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int i; /* * Re-calculate the tsf id, as the leader-follower relations depend @@ -2609,8 +2632,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (vif->cfg.assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); - memset(&mvmvif->deflink.beacon_stats, 0, - sizeof(mvmvif->deflink.beacon_stats)); + for_each_mvm_vif_valid_link(mvmvif, i) + memset(&mvmvif->link[i]->beacon_stats, 0, + sizeof(mvmvif->link[i]->beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); @@ -2657,7 +2681,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * time could be small without us having heard * a beacon yet. */ - iwl_mvm_protect_assoc(mvm, vif, 0); + iwl_mvm_protect_assoc(mvm, vif, 0, 0); } iwl_mvm_sf_update(mvm, vif, false); @@ -3023,22 +3047,6 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u64 changes) -{ - static const struct iwl_mvm_bss_info_changed_ops callbacks = { - .bss_info_changed_sta = iwl_mvm_bss_info_changed_station, - .bss_info_changed_ap_ibss = iwl_mvm_bss_info_changed_ap_ibss, - }; - - iwl_mvm_bss_info_changed_common(hw, vif, bss_conf, &callbacks, - changes); -} - -void -iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - const struct iwl_mvm_bss_info_changed_ops *callbacks, - u64 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -3049,12 +3057,11 @@ iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, switch (vif->type) { case NL80211_IFTYPE_STATION: - callbacks->bss_info_changed_sta(mvm, vif, bss_conf, changes); + iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: - callbacks->bss_info_changed_ap_ibss(mvm, vif, bss_conf, - changes); + iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) @@ -3666,6 +3673,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, NL80211_TDLS_SETUP); } + if (ret) + return ret; + for_each_sta_active_link(vif, sta, link_sta, i) link_sta->agg.max_rc_amsdu_len = 1; @@ -3761,6 +3771,13 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, callbacks->mac_ctxt_changed(mvm, vif, false); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); + + /* when client is authorized (AP station marked as such), + * try to enable more links + */ + if (vif->type == NL80211_IFTYPE_STATION && + !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_mld_select_links(mvm, vif, false); } mvm_sta->authorized = true; @@ -3860,9 +3877,14 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - /* this would be a mac80211 bug ... but don't crash */ + /* this would be a mac80211 bug ... but don't crash, unless we had a + * firmware crash while we were activating a link, in which case it is + * legit to have phy_ctxt = NULL. Don't bother not to WARN if we are in + * recovery flow since we spit tons of error messages anyway. + */ for_each_sta_active_link(vif, sta, link_sta, link_id) { - if (WARN_ON_ONCE(!mvmvif->link[link_id]->phy_ctxt)) { + if (WARN_ON_ONCE(!mvmvif->link[link_id] || + !mvmvif->link[link_id]->phy_ctxt)) { mutex_unlock(&mvm->mutex); return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; @@ -4001,7 +4023,7 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); - iwl_mvm_protect_assoc(mvm, vif, info->duration); + iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id); mutex_unlock(&mvm->mutex); } @@ -4138,12 +4160,21 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. - * CMAC/GMAC in AP/IBSS modes must be done in software. + * CMAC/GMAC in AP/IBSS modes must be done in software + * on older NICs. * * Except, of course, beacon protection - it must be - * offloaded since we just set a beacon template. + * offloaded since we just set a beacon template, and + * then we must also offload the IGTK (not just BIGTK) + * for firmware reasons. + * + * So just check for beacon protection - if we don't + * have it we cannot get here with keyidx >= 6, and + * if we do have it we need to send the key to FW in + * all cases (CMAC/GMAC). */ - if (keyidx < 6 && + if (!wiphy_ext_feature_isset(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION) && (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) { @@ -4372,6 +4403,39 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) + +static void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, + u32 duration_ms, + u32 *duration_tu, + u32 *delay) +{ + u32 dtim_interval = vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int; + + *delay = AUX_ROC_MIN_DELAY; + *duration_tu = MSEC_TO_TU(duration_ms); + + /* + * If we are associated we want the delay time to be at least one + * dtim interval so that the FW can wait until after the DTIM and + * then start the time event, this will potentially allow us to + * remain off-channel for the max duration. + * Since we want to use almost a whole dtim interval we would also + * like the delay to be for 2-3 dtim intervals, in case there are + * other time events with higher priority. + */ + if (vif->cfg.assoc) { + *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); + /* We cannot remain off-channel longer than the DTIM interval */ + if (dtim_interval <= *duration_tu) { + *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER; + if (*duration_tu <= AUX_ROC_MIN_DURATION) + *duration_tu = dtim_interval - + AUX_ROC_MIN_SAFETY_BUFFER; + } + } +} + static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, @@ -4382,8 +4446,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; - u32 dtim_interval = vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int; u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), @@ -4404,29 +4466,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, /* Set the time and duration */ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); - delay = AUX_ROC_MIN_DELAY; - req_dur = MSEC_TO_TU(duration); - - /* - * If we are associated we want the delay time to be at least one - * dtim interval so that the FW can wait until after the DTIM and - * then start the time event, this will potentially allow us to - * remain off-channel for the max duration. - * Since we want to use almost a whole dtim interval we would also - * like the delay to be for 2-3 dtim intervals, in case there are - * other time events with higher priority. - */ - if (vif->cfg.assoc) { - delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); - /* We cannot remain off-channel longer than the DTIM interval */ - if (dtim_interval <= req_dur) { - req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; - if (req_dur <= AUX_ROC_MIN_DURATION) - req_dur = dtim_interval - - AUX_ROC_MIN_SAFETY_BUFFER; - } - } - + iwl_mvm_roc_duration_and_delay(vif, duration, &req_dur, &delay); tail->duration = cpu_to_le32(req_dur); tail->apply_time_max_delay = cpu_to_le32(delay); @@ -4434,8 +4474,8 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, "ROC: Requesting to remain on channel %u for %ums\n", channel->hw_value, req_dur); IWL_DEBUG_TE(mvm, - "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", - duration, delay, dtim_interval); + "\t(requested = %ums, max_delay = %ums)\n", + duration, delay); /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); @@ -4493,6 +4533,48 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, return res; } +static int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration, u32 activity) +{ + int res; + u32 duration_tu, delay; + struct iwl_roc_req roc_req = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .activity = cpu_to_le32(activity), + .sta_id = cpu_to_le32(mvm->aux_sta.sta_id), + }; + + lockdep_assert_held(&mvm->mutex); + + /* Set the channel info data */ + iwl_mvm_set_chan_info(mvm, &roc_req.channel_info, + channel->hw_value, + iwl_mvm_phy_band_from_nl80211(channel->band), + IWL_PHY_CHANNEL_MODE20, 0); + + iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu, + &delay); + roc_req.duration = cpu_to_le32(duration_tu); + roc_req.max_delay = cpu_to_le32(delay); + + IWL_DEBUG_TE(mvm, + "\t(requested = %ums, max_delay = %ums)\n", + duration, delay); + IWL_DEBUG_TE(mvm, + "Requesting to remain on channel %u for %utu\n", + channel->hw_value, duration_tu); + + /* Set the node address */ + memcpy(roc_req.node_addr, vif->addr, ETH_ALEN); + + res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), + 0, sizeof(roc_req), &roc_req); + + return res; +} + static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id) { int ret = 0; @@ -4543,6 +4625,75 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); } +static int iwl_mvm_roc_station(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration) +{ + int ret; + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); + u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + + if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) { + ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); + } else if (fw_ver == 3) { + ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, + ROC_ACTIVITY_HOTSPOT); + } else { + ret = -EOPNOTSUPP; + IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver); + } + + return ret; +} + +static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel *channel) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct cfg80211_chan_def chandef; + int i; + + lockdep_assert_held(&mvm->mutex); + + if (mvmvif->deflink.phy_ctxt && + channel == mvmvif->deflink.phy_ctxt->channel) + return 0; + + /* Try using a PHY context that is already in use */ + for (i = 0; i < NUM_PHY_CTX; i++) { + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[i]; + + if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt) + continue; + + if (channel == phy_ctxt->channel) { + if (mvmvif->deflink.phy_ctxt) + iwl_mvm_phy_ctxt_unref(mvm, + mvmvif->deflink.phy_ctxt); + + mvmvif->deflink.phy_ctxt = phy_ctxt; + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); + return 0; + } + } + + /* We already have a phy_ctxt, but it's not on the right channel */ + if (mvmvif->deflink.phy_ctxt) + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + + mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->deflink.phy_ctxt) + return -ENOSPC; + + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); + + return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, + &chandef, 1, 1); +} + /* Execute the common part for MLD and non-MLD modes */ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, @@ -4550,11 +4701,8 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct iwl_mvm_roc_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct cfg80211_chan_def chandef; - struct iwl_mvm_phy_ctxt *phy_ctxt; - int ret, i; u32 lmac_id; + int ret; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); @@ -4574,8 +4722,7 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, /* Use aux roc framework (HS20) */ ret = ops->add_aux_sta_for_hs20(mvm, lmac_id); if (!ret) - ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, - vif, duration); + ret = iwl_mvm_roc_station(mvm, channel, vif, duration); goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ @@ -4586,51 +4733,11 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto out_unlock; } - /* Try using a PHY context that is already in use */ - for (i = 0; i < NUM_PHY_CTX; i++) { - phy_ctxt = &mvm->phy_ctxts[i]; - if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt) - continue; - - if (channel == phy_ctxt->channel) { - if (mvmvif->deflink.phy_ctxt) - iwl_mvm_phy_ctxt_unref(mvm, - mvmvif->deflink.phy_ctxt); - - mvmvif->deflink.phy_ctxt = phy_ctxt; - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - goto link_and_start_p2p_roc; - } - } - - /* If the currently used PHY context is configured with a matching - * channel use it - */ - if (mvmvif->deflink.phy_ctxt) { - if (channel == mvmvif->deflink.phy_ctxt->channel) - goto link_and_start_p2p_roc; - } else { - phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!phy_ctxt) { - ret = -ENOSPC; - goto out_unlock; - } - - mvmvif->deflink.phy_ctxt = phy_ctxt; - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - } - - /* Configure the PHY context */ - cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, - 1, 1); - if (ret) { - IWL_ERR(mvm, "Failed to change PHY context\n"); + ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel); + if (ret) goto out_unlock; - } -link_and_start_p2p_roc: ret = ops->link(mvm, vif); if (ret) goto out_unlock; @@ -4692,8 +4799,9 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; - bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); - struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; + bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) || + iwl_mvm_enable_fils(mvm, ctx); + struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); @@ -4706,15 +4814,14 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, goto out; } - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, - ctx->rx_chains_static, - ctx->rx_chains_dynamic); + ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); if (ret) { IWL_ERR(mvm, "Failed to add PHY context\n"); goto out; } - iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); *phy_ctxt_id = phy_ctxt->id; out: return ret; @@ -4760,8 +4867,9 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); - struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; + bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) || + iwl_mvm_enable_fils(mvm, ctx); + struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | @@ -4900,7 +5008,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { - u32 duration = 3 * vif->bss_conf.beacon_int; + u32 duration = 5 * vif->bss_conf.beacon_int; /* Protect the session to make sure we hear the first * beacon on the new channel. @@ -5178,8 +5286,8 @@ int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) return mvm->ibss_manager; } -int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - bool set) +static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -5406,7 +5514,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, goto out_unlock; } - if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) + if (chsw->delay > IWL_MAX_CSA_BLOCK_TX && + hweight16(vif->valid_links) <= 1) schedule_delayed_work(&mvmvif->csa_work, 0); if (chsw->block_tx) { @@ -5485,7 +5594,7 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ iwl_mvm_abort_channel_switch(hw, vif); - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); mvmvif->csa_misbehave = false; return; } @@ -5626,7 +5735,11 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; + int ret = 0; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); memset(survey, 0, sizeof(*survey)); @@ -5646,13 +5759,8 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, goto out; } - survey->filled = SURVEY_INFO_TIME | - SURVEY_INFO_TIME_RX | - SURVEY_INFO_TIME_TX | - SURVEY_INFO_TIME_SCAN; - survey->time = mvm->accu_radio_stats.on_time_rf + - mvm->radio_stats.on_time_rf; - do_div(survey->time, USEC_PER_MSEC); + survey->filled = SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX; survey->time_rx = mvm->accu_radio_stats.rx_time + mvm->radio_stats.rx_time; @@ -5662,11 +5770,20 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, mvm->radio_stats.tx_time; do_div(survey->time_tx, USEC_PER_MSEC); + /* the new fw api doesn't support the following fields */ + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + goto out; + + survey->filled |= SURVEY_INFO_TIME | + SURVEY_INFO_TIME_SCAN; + survey->time = mvm->accu_radio_stats.on_time_rf + + mvm->radio_stats.on_time_rf; + do_div(survey->time, USEC_PER_MSEC); + survey->time_scan = mvm->accu_radio_stats.on_time_scan + mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); - ret = 0; out: mutex_unlock(&mvm->mutex); return ret; @@ -5815,6 +5932,7 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int i; if (mvmsta->deflink.avg_energy) { sinfo->signal_avg = -(s8)mvmsta->deflink.avg_energy; @@ -5843,8 +5961,11 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (iwl_mvm_request_statistics(mvm, false)) goto unlock; - sinfo->rx_beacon = mvmvif->deflink.beacon_stats.num_beacons + - mvmvif->deflink.beacon_stats.accu_num_beacons; + sinfo->rx_beacon = 0; + for_each_mvm_vif_valid_link(mvmvif, i) + sinfo->rx_beacon += mvmvif->link[i]->beacon_stats.num_beacons + + mvmvif->link[i]->beacon_stats.accu_num_beacons; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->deflink.beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ @@ -6150,6 +6271,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, + .set_antenna = iwl_mvm_op_set_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, @@ -6232,6 +6354,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS + .vif_add_debugfs = iwl_mvm_vif_add_debugfs, .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, #endif .set_hw_timestamp = iwl_mvm_set_hw_timestamp, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 1e58f02342..893b69fc84 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -10,6 +10,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int i; mutex_lock(&mvm->mutex); @@ -22,8 +23,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - mvmvif->deflink.beacon_stats.accu_num_beacons += - mvmvif->deflink.beacon_stats.num_beacons; + for_each_mvm_vif_valid_link(mvmvif, i) + mvmvif->link[i]->beacon_stats.accu_num_beacons += + mvmvif->link[i]->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); @@ -79,7 +81,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); } - iwl_mvm_vif_dbgfs_register(mvm, vif); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -135,7 +138,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); - iwl_mvm_vif_dbgfs_clean(mvm, vif); + iwl_mvm_vif_dbgfs_rm_link(mvm, vif); /* For AP/GO interface, the tear down of the resources allocated to the * interface is be handled as part of the stop_ap flow. @@ -211,8 +214,8 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, mvmvif->esr_active = true; - /* Disable SMPS overrideing by user */ - vif->driver_flags |= IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + /* Indicate to mac80211 that EML is enabled */ + vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE; iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, IEEE80211_SMPS_OFF); @@ -370,7 +373,7 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, mvmvif->esr_active = false; - vif->driver_flags &= ~IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, IEEE80211_SMPS_AUTOMATIC); @@ -435,6 +438,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, mvmvif->ap_ibss_active = false; } + iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) { int ret = iwl_mvm_esr_mode_inactive(mvm, vif); @@ -446,9 +452,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, if (vif->type == NL80211_IFTYPE_MONITOR) iwl_mvm_mld_rm_snif_sta(mvm, vif); - iwl_mvm_link_changed(mvm, vif, link_conf, - LINK_CONTEXT_MODIFY_ACTIVE, false); - if (switching_chanctx) return; mvmvif->link[link_id]->phy_ctxt = NULL; @@ -460,10 +463,17 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_mld_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); + /* in the non-MLD case, remove/re-add the link to clean up FW state */ + if (!ieee80211_vif_is_mld(vif) && !mvmvif->ap_sta && + !WARN_ON_ONCE(vif->cfg.assoc)) { + iwl_mvm_remove_link(mvm, vif, link_conf); + iwl_mvm_add_link(mvm, vif, link_conf); + } mutex_unlock(&mvm->mutex); } @@ -594,6 +604,126 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw, &callbacks); } +struct iwl_mvm_link_sel_data { + u8 link_id; + enum nl80211_band band; + bool active; +}; + +static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a, + struct iwl_mvm_link_sel_data *b) +{ + return a->band != b->band; +} + +void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool valid_links_changed) +{ + struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; + unsigned long usable_links = ieee80211_vif_usable_links(vif); + u32 max_active_links = iwl_mvm_max_active_links(mvm, vif); + u16 new_active_links; + u8 link_id, n_data = 0, i, j; + + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + + if (!ieee80211_vif_is_mld(vif) || usable_links == 1) + return; + + /* The logic below is a simple version that doesn't suit more than 2 + * links + */ + WARN_ON_ONCE(max_active_links > 2); + + /* if only a single active link is supported, assume that the one + * selected by higher layer for connection establishment is the best. + */ + if (max_active_links == 1 && !valid_links_changed) + return; + + /* If we are already using the maximal number of active links, don't do + * any change. This can later be optimized to pick a 'better' link pair. + */ + if (hweight16(vif->active_links) == max_active_links) + return; + + rcu_read_lock(); + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(vif->link_conf[link_id]); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + data[n_data].link_id = link_id; + data[n_data].band = link_conf->chandef.chan->band; + data[n_data].active = vif->active_links & BIT(link_id); + n_data++; + } + + rcu_read_unlock(); + + /* this is expected to be the current active link */ + if (n_data == 1) + return; + + new_active_links = 0; + + /* Assume that after association only a single link is active, thus, + * select only the 2nd link + */ + if (!valid_links_changed) { + for (i = 0; i < n_data; i++) { + if (data[i].active) + break; + } + + if (WARN_ON_ONCE(i == n_data)) + return; + + for (j = 0; j < n_data; j++) { + if (i == j) + continue; + + if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j])) + break; + } + + if (j != n_data) + new_active_links = BIT(data[i].link_id) | + BIT(data[j].link_id); + } else { + /* Try to find a valid link pair for EMLSR operation. If a pair + * is not found continue using the current active link. + */ + for (i = 0; i < n_data; i++) { + for (j = 0; j < n_data; j++) { + if (i == j) + continue; + + if (iwl_mvm_mld_valid_link_pair(&data[i], + &data[j])) + break; + } + + /* found a valid pair for EMLSR, use it */ + if (j != n_data) { + new_active_links = BIT(data[i].link_id) | + BIT(data[j].link_id); + break; + } + } + } + + if (!new_active_links) + return; + + if (vif->active_links != new_active_links) + ieee80211_set_active_links_async(vif, new_active_links); +} + static void iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -638,6 +768,9 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + if (changes & BSS_CHANGED_MLD_VALID_LINKS) + iwl_mvm_mld_select_links(mvm, vif, true); + memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid, ETH_ALEN); @@ -728,6 +861,12 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && protect) { + /* We are in assoc so only one link is active- + * The association link + */ + unsigned int link_id = + ffs(vif->active_links) - 1; + /* If we're not restarting and still haven't * heard a beacon (dtim period unknown) then * make sure we still have enough minimum time @@ -737,7 +876,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, * time could be small without us having heard * a beacon yet. */ - iwl_mvm_protect_assoc(mvm, vif, 0); + iwl_mvm_protect_assoc(mvm, vif, 0, link_id); } iwl_mvm_sf_update(mvm, vif, false); @@ -1090,6 +1229,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, + .set_antenna = iwl_mvm_op_set_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, @@ -1136,8 +1276,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .tx_last_beacon = iwl_mvm_tx_last_beacon, - .set_tim = iwl_mvm_set_tim, - .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, @@ -1172,6 +1310,8 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .abort_pmsr = iwl_mvm_abort_pmsr, #ifdef CONFIG_IWLWIFI_DEBUGFS + .vif_add_debugfs = iwl_mvm_vif_add_debugfs, + .link_add_debugfs = iwl_mvm_link_add_debugfs, .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, #endif .set_hw_timestamp = iwl_mvm_set_hw_timestamp, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 1ccbe8c1ee..6af606e5da 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -697,6 +697,8 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* at this stage sta link pointers are already allocated */ ret = iwl_mvm_mld_update_sta(mvm, vif, sta); + if (ret) + goto err; for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = @@ -1106,15 +1108,26 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, link_sta_dereference_protected(sta, link_id); mvm_vif_link = mvm_vif->link[link_id]; - if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta || - mvm_sta->link[link_id])) { + if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta)) { ret = -EINVAL; goto err; } - ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); - if (WARN_ON(ret)) - goto err; + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (WARN_ON(!mvm_sta->link[link_id])) { + ret = -EINVAL; + goto err; + } + } else { + if (WARN_ON(mvm_sta->link[link_id])) { + ret = -EINVAL; + goto err; + } + ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, + link_id); + if (WARN_ON(ret)) + goto err; + } link_sta->agg.max_rc_amsdu_len = 1; ieee80211_sta_recalc_aggregates(sta); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 218f3bc311..f2af3e5714 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -121,15 +121,16 @@ struct iwl_mvm_time_event_data { * if the te is in the time event list or not (when id == TE_MAX) */ u32 id; + u8 link_id; }; /* Power management */ /** * enum iwl_power_scheme - * @IWL_POWER_LEVEL_CAM - Continuously Active Mode - * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) - * @IWL_POWER_LEVEL_LP - Low Power + * @IWL_POWER_SCHEME_CAM: Continuously Active Mode + * @IWL_POWER_SCHEME_BPS: Balanced Power Save (default) + * @IWL_POWER_SCHEME_LP: Low Power */ enum iwl_power_scheme { IWL_POWER_SCHEME_CAM = 1, @@ -137,7 +138,6 @@ enum iwl_power_scheme { IWL_POWER_SCHEME_LP }; -#define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -218,7 +218,7 @@ enum iwl_bt_force_ant_mode { }; /** - * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs + * enum iwl_mvm_low_latency_force - low latency force mode set by debugfs * @LOW_LATENCY_FORCE_UNSET: unset force mode * @LOW_LATENCY_FORCE_ON: for low latency on * @LOW_LATENCY_FORCE_OFF: for low latency off @@ -232,7 +232,7 @@ enum iwl_mvm_low_latency_force { }; /** -* struct iwl_mvm_low_latency_cause - low latency set causes +* enum iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command @@ -302,7 +302,11 @@ struct iwl_probe_resp_data { * @queue_params: QoS params for this MAC * @mgmt_queue: queue number for unbufferable management frames * @igtk: the current IGTK programmed into the firmware + * @active: indicates the link is active in FW (for sanity checking) + * @cab_queue: content-after-beacon (multicast) queue * @listen_lmac: indicates this link is allocated to the listen LMAC + * @mcast_sta: multicast station + * @phy_ctxt: phy context allocated to this link, if any */ struct iwl_mvm_vif_link_info { u8 bssid[ETH_ALEN]; @@ -342,6 +346,7 @@ struct iwl_mvm_vif_link_info { /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context + * @mvm: pointer back to the mvm struct * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal * @associated: indicates that we're currently associated, used only for @@ -364,6 +369,13 @@ struct iwl_mvm_vif_link_info { * @csa_failed: CSA failed to schedule time event, report an error later * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel * @features: hw features active for this vif + * @ap_beacon_time: AP beacon time for synchronisation (on older FW) + * @bcn_prot: beacon protection data (keys; FIXME: needs to be per link) + * @bf_data: beacon filtering data + * @deflink: default link data for use in non-MLO + * @link: link data for each link in MLO + * @esr_active: indicates eSR mode is active + * @pm_enabled: indicates powersave is enabled */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -635,18 +647,9 @@ struct iwl_mvm_tcm { * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection - * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU - * it is the time of last received sub-frame - * @removed: prevent timer re-arming * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context - * @consec_oldsn_drops: consecutive drops due to old SN - * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track - * when to apply old SN consecutive drop workaround - * @consec_oldsn_prev_drop: track whether or not an MPDU - * that was single/part of the previous A-MPDU was - * dropped due to old SN */ struct iwl_mvm_reorder_buffer { u16 head_sn; @@ -655,33 +658,21 @@ struct iwl_mvm_reorder_buffer { int queue; u16 last_amsdu; u8 last_sub_index; - struct timer_list reorder_timer; - bool removed; bool valid; spinlock_t lock; struct iwl_mvm *mvm; - unsigned int consec_oldsn_drops; - u32 consec_oldsn_ampdu_gp2; - unsigned int consec_oldsn_prev_drop:1; } ____cacheline_aligned_in_smp; /** - * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno + * struct iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno * @frames: list of skbs stored - * @reorder_time: time the packet was stored in the reorder buffer */ -struct _iwl_mvm_reorder_buf_entry { - struct sk_buff_head frames; - unsigned long reorder_time; -}; - -/* make this indirection to get the aligned thing */ struct iwl_mvm_reorder_buf_entry { - struct _iwl_mvm_reorder_buf_entry e; + struct sk_buff_head frames; } #ifndef __CHECKER__ /* sparse doesn't like this construct: "bad integer constant expression" */ -__aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) +__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head))) #endif ; @@ -689,15 +680,17 @@ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) * struct iwl_mvm_baid_data - BA session data * @sta_mask: current station mask for the BAID * @tid: tid of the session - * @baid baid of the session + * @baid: baid of the session * @timeout: the timeout set in the addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update * @session_timer: timer to check if BA session expired, runs at 2 * timeout + * @rcu_ptr: BA data RCU protected access + * @rcu_head: RCU head for freeing this data * @mvm: mvm pointer, needed for timer context * @reorder_buf: reorder buffer, allocated per queue - * @reorder_buf_data: data + * @entries: data */ struct iwl_mvm_baid_data { struct rcu_head rcu_head; @@ -967,6 +960,9 @@ struct iwl_mvm { u8 scan_last_antenna_idx; /* to toggle TX between antennas */ u8 mgmt_last_antenna_idx; + u8 set_tx_ant; + u8 set_rx_ant; + /* last smart fifo state that was successfully sent to firmware */ enum iwl_sf_state sf_state; @@ -1198,6 +1194,8 @@ struct iwl_mvm { struct iwl_time_sync_data time_sync; struct iwl_mei_scan_filter mei_scan_filter; + + bool statistics_clear; }; /* Extract MVM priv from op_mode and _hw */ @@ -1689,6 +1687,16 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) } /* Statistics */ +void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +static inline void +iwl_mvm_handle_rx_system_end_stats_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ +} + void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, @@ -1702,16 +1710,29 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { - return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? - mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : - mvm->fw->valid_tx_ant; + u8 tx_ant = mvm->fw->valid_tx_ant; + + if (mvm->nvm_data && mvm->nvm_data->valid_tx_ant) + tx_ant &= mvm->nvm_data->valid_tx_ant; + + if (mvm->set_tx_ant) + tx_ant &= mvm->set_tx_ant; + + return tx_ant; } static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { - return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? - mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : - mvm->fw->valid_rx_ant; + u8 rx_ant = mvm->fw->valid_tx_ant; + + if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant) + rx_ant &= mvm->nvm_data->valid_tx_ant; + + if (mvm->set_rx_ant) + rx_ant &= mvm->set_rx_ant; + + return rx_ant; + } static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant) @@ -1892,41 +1913,10 @@ void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* BSS Info */ -/** - * struct iwl_mvm_bss_info_changed_ops - callbacks for the bss_info_changed() - * - * Since the only difference between both MLD and - * non-MLD versions of bss_info_changed() is these function calls, - * each version will send its specific function calls to - * %iwl_mvm_bss_info_changed_common(). - * - * @bss_info_changed_sta: pointer to the function that handles changes - * in bss_info in sta mode - * @bss_info_changed_ap_ibss: pointer to the function that handles changes - * in bss_info in ap and ibss modes - */ -struct iwl_mvm_bss_info_changed_ops { - void (*bss_info_changed_sta)(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u64 changes); - void (*bss_info_changed_ap_ibss)(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u64 changes); -}; - -void -iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - const struct iwl_mvm_bss_info_changed_ops *callbacks, - u64 changes); -void -iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, - u64 changes); +void iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes); void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u64 changes); @@ -1958,7 +1948,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /*Session Protection */ void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 duration_override); + u32 duration_override, unsigned int link_id); /* Quota management */ static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm) @@ -2018,18 +2008,19 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm); -void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { } static inline void -iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } static inline void -iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif /* CONFIG_IWLWIFI_DEBUGFS */ @@ -2262,7 +2253,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, bool *changed); struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed); -int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); +int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync); void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); /* smart fifo */ @@ -2315,7 +2306,8 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added); void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + unsigned int link_id); int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, @@ -2334,7 +2326,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size); -void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); @@ -2374,6 +2365,10 @@ void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta, struct dentry *dir); +void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir); #endif /* new MLD related APIs */ @@ -2426,7 +2421,8 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) /* Channel Switch */ void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk); int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); /* Channel Context */ /** @@ -2610,6 +2606,7 @@ int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); +int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int iwl_mvm_mac_start(struct ieee80211_hw *hw); void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); @@ -2681,8 +2678,6 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed); int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw); -int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - bool set); void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw); int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, @@ -2724,4 +2719,8 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_set_hw_timestamp *hwts); int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +bool iwl_mvm_enable_fils(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx); +void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool valid_links_changed); #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index f67ab8ee18..c0dd441e80 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -220,6 +220,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __be16 *hw; const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; + u8 tx_ant = mvm->fw->valid_tx_ant; + u8 rx_ant = mvm->fw->valid_rx_ant; int regulatory_type; /* Checking for required sections */ @@ -270,9 +272,15 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; + if (mvm->set_tx_ant) + tx_ant &= mvm->set_tx_ant; + + if (mvm->set_rx_ant) + rx_ant &= mvm->set_rx_ant; + return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, regulatory, mac_override, phy_sku, - mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant); + tx_ant, rx_ant); } /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ @@ -565,7 +573,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) * try to replay the last set MCC to FW. If it doesn't exist, * queue an update to cfg80211 to retrieve the default alpha2 from FW. */ - retval = iwl_mvm_init_fw_regd(mvm); + retval = iwl_mvm_init_fw_regd(mvm, true); if (retval != -ENOENT) return retval; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5336a4afde..1627b2f819 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -322,6 +322,19 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF, + iwl_mvm_handle_rx_system_oper_stats, + RX_HANDLER_ASYNC_LOCKED, + struct iwl_system_statistics_notif_oper), + RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF, + iwl_mvm_handle_rx_system_oper_part1_stats, + RX_HANDLER_ASYNC_LOCKED, + struct iwl_system_statistics_part1_notif_oper), + RX_HANDLER_GRP(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF, + iwl_mvm_handle_rx_system_end_stats_notif, + RX_HANDLER_ASYNC_LOCKED, + struct iwl_system_statistics_end_notif), + RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, iwl_mvm_window_status_notif, RX_HANDLER_SYNC, struct iwl_ba_window_status_notif), @@ -426,6 +439,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION, iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC, struct iwl_time_msmt_cfm_notify), + RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF, + iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC, + struct iwl_roc_notif), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -534,6 +550,8 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(RFI_CONFIG_CMD), HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), + HCMD_NAME(SYSTEM_STATISTICS_CMD), + HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF), HCMD_NAME(RFI_DEACTIVATE_NOTIF), }; @@ -549,6 +567,8 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(AUX_STA_CMD), HCMD_NAME(STA_REMOVE_CMD), HCMD_NAME(STA_DISABLE_TX_CMD), + HCMD_NAME(ROC_CMD), + HCMD_NAME(ROC_NOTIF), HCMD_NAME(SESSION_PROTECTION_NOTIF), HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; @@ -586,6 +606,14 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(RX_QUEUES_NOTIFICATION), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = { + HCMD_NAME(STATISTICS_OPER_NOTIF), + HCMD_NAME(STATISTICS_OPER_PART1_NOTIF), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ @@ -640,6 +668,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), + [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names), }; /* this forward declaration can avoid to export the function */ @@ -751,7 +780,10 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) */ mvm->nvm_data = iwl_parse_mei_nvm_data(trans, trans->cfg, - mvm->mei_nvm_data, mvm->fw); + mvm->mei_nvm_data, + mvm->fw, + mvm->set_tx_ant, + mvm->set_rx_ant); return 0; } @@ -790,6 +822,9 @@ get_nvm_from_fw: if (ret) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + /* no longer need this regardless of failure or not */ + mvm->pldr_sync = false; + return ret; } @@ -1136,7 +1171,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - max_agg = IEEE80211_MAX_AMPDU_BUF_EHT; + max_agg = 512; else max_agg = IEEE80211_MAX_AMPDU_BUF_HE; @@ -1298,7 +1333,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), - "%s", fw->fw_version); + "%.31s", fw->fw_version); trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); @@ -1944,9 +1979,6 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - if (mvm->pldr_sync) - return; - if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 9c582e23eb..334d1f59f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -181,6 +181,9 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info, chains_static, chains_dynamic); + IWL_DEBUG_FW(mvm, "Send RLC command: phy=%d, rx_chain_info=0x%x\n", + ctxt->id, cmd.rlc.rx_chain_info); + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD, DATA_PATH_GROUP, 2), 0, sizeof(cmd), &cmd); @@ -254,6 +257,8 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { + int ret; + WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && ctxt->ref); lockdep_assert_held(&mvm->mutex); @@ -262,9 +267,16 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, - chains_static, chains_dynamic, - FW_CTXT_ACTION_ADD); + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_ADD); + + if (ret) + return ret; + + ctxt->ref++; + + return 0; } /* @@ -274,6 +286,11 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); + + /* If we were taking the first ref, we should have + * called iwl_mvm_phy_ctxt_add. + */ + WARN_ON(!ctxt->ref); ctxt->ref++; } @@ -290,7 +307,11 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, lockdep_assert_held(&mvm->mutex); - if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 && + if (WARN_ON_ONCE(!ctxt->ref)) + return -EINVAL; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, + RLC_CONFIG_CMD), 0) >= 2 && ctxt->channel == chandef->chan && ctxt->width == chandef->width && ctxt->center_freq1 == chandef->center_freq1) @@ -324,6 +345,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { + struct cfg80211_chan_def chandef; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!ctxt)) @@ -331,41 +353,13 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) ctxt->ref--; - /* - * Move unused phy's to a default channel. When the phy is moved the, - * fw will cleanup immediate quiet bit if it was previously set, - * otherwise we might not be able to reuse this phy. - */ - if (ctxt->ref == 0) { - struct ieee80211_channel *chan = NULL; - struct cfg80211_chan_def chandef; - struct ieee80211_supported_band *sband; - enum nl80211_band band; - int channel; - - for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { - sband = mvm->hw->wiphy->bands[band]; - - if (!sband) - continue; - - for (channel = 0; channel < sband->n_channels; channel++) - if (!(sband->channels[channel].flags & - IEEE80211_CHAN_DISABLED)) { - chan = &sband->channels[channel]; - break; - } - - if (chan) - break; - } - - if (WARN_ON(!chan)) - return; - - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); - } + if (ctxt->ref) + return; + + cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT); + + iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1, + FW_CTXT_ACTION_REMOVE); } static void iwl_mvm_binding_iterator(void *_data, u8 *mac, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 9131b5f1bc..1b9b06e044 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -489,6 +489,11 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm) if (mvm->ext_clock_valid) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK); + if (iwl_fw_lookup_cmd_ver(mvm->fw, POWER_TABLE_CMD, 0) >= 7 && + test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) + cmd.flags |= + cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK); + IWL_DEBUG_POWER(mvm, "Sending device power command with flags = 0x%X\n", cmd.flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 1ca375a5cf..376b23b409 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -3,7 +3,7 @@ * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2003 - 2014, 2018 - 2022 Intel Corporation + * Copyright (C) 2003 - 2014, 2018 - 2023 Intel Corporation *****************************************************************************/ #ifndef __rs_h__ @@ -203,18 +203,12 @@ struct rs_rate { /** * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW * @last_rate_n_flags: last rate reported by FW - * @max_agg_bufsize: the maximal size of the AGG buffer for this station - * @sta_id: the id of the station -#ifdef CONFIG_MAC80211_DEBUGFS - * @dbg_fixed_rate: for debug, use fixed rate if not 0 - * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU -#endif + * @pers.sta_id: the id of the station * @chains: bitmask of chains reported in %chain_signal * @chain_signal: per chain signal strength * @last_rssi: last rssi reported * @drv: pointer back to the driver data */ - struct iwl_lq_sta_rs_fw { /* last tx rate_n_flags */ u32 last_rate_n_flags; @@ -223,7 +217,14 @@ struct iwl_lq_sta_rs_fw { struct lq_sta_pers_rs_fw { u32 sta_id; #ifdef CONFIG_MAC80211_DEBUGFS + /** + * @dbg_fixed_rate: for debug, use fixed rate if not 0 + */ u32 dbg_fixed_rate; + /** + * @dbg_agg_frame_count_lim: for debug, max number of + * frames in A-MPDU + */ u16 dbg_agg_frame_count_lim; #endif u8 chains; @@ -233,7 +234,7 @@ struct iwl_lq_sta_rs_fw { } pers; }; -/** +/* * struct iwl_rate_scale_data -- tx success history for one rate */ struct iwl_rate_scale_data { @@ -275,7 +276,7 @@ struct rs_rate_stats { u64 total; }; -/** +/* * struct iwl_scale_tbl_info -- tx params and success history for all rates * * There are two of these in struct iwl_lq_sta, @@ -296,7 +297,7 @@ enum { RS_STATE_STAY_IN_COLUMN, }; -/** +/* * struct iwl_lq_sta -- driver's rate scaling private structure * * Pointer to this gets passed back and forth between driver and mac80211. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 542c192698..8caa971770 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -553,7 +553,7 @@ struct iwl_mvm_stat_data { struct iwl_mvm_stat_data_all_macs { struct iwl_mvm *mvm; __le32 flags; - struct iwl_statistics_ntfy_per_mac *per_mac_stats; + struct iwl_stats_ntfy_per_mac *per_mac; }; static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) @@ -658,7 +658,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data_all_macs *data = _data; - struct iwl_statistics_ntfy_per_mac *mac_stats; + struct iwl_stats_ntfy_per_mac *mac_stats; int sig; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 vif_id = mvmvif->id; @@ -669,7 +669,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, if (vif->type != NL80211_IFTYPE_STATION) return; - mac_stats = &data->per_mac_stats[vif_id]; + mac_stats = &data->per_mac[vif_id]; mvmvif->deflink.beacon_stats.num_beacons = le32_to_cpu(mac_stats->beacon_counter); @@ -759,7 +759,7 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, struct iwl_mvm_stat_data_all_macs data = { .mvm = mvm, .flags = stats->flags, - .per_mac_stats = stats->per_mac_stats, + .per_mac = stats->per_mac, }; ieee80211_iterate_active_interfaces(mvm->hw, @@ -828,6 +828,142 @@ static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm, return true; } +static void +iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, + struct iwl_stats_ntfy_per_link *per_link) +{ + u32 air_time[MAC_INDEX_AUX] = {}; + u32 rx_bytes[MAC_INDEX_AUX] = {}; + int fw_link_id; + + for (fw_link_id = 0; fw_link_id < ARRAY_SIZE(mvm->link_id_to_link_conf); + fw_link_id++) { + struct iwl_stats_ntfy_per_link *link_stats; + struct ieee80211_bss_conf *bss_conf; + struct iwl_mvm_vif *mvmvif; + int link_id; + int sig; + + bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id, + false); + if (!bss_conf) + continue; + + if (bss_conf->vif->type != NL80211_IFTYPE_STATION) + continue; + + link_id = bss_conf->link_id; + if (link_id >= ARRAY_SIZE(mvmvif->link)) + continue; + + mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif); + if (!mvmvif || !mvmvif->link[link_id]) + continue; + + link_stats = &per_link[fw_link_id]; + + mvmvif->link[link_id]->beacon_stats.num_beacons = + le32_to_cpu(link_stats->beacon_counter); + + /* we basically just use the u8 to store 8 bits and then treat + * it as a s8 whenever we take it out to a different type. + */ + mvmvif->link[link_id]->beacon_stats.avg_signal = + -le32_to_cpu(link_stats->beacon_average_energy); + + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (mvm->statistics_clear) + mvmvif->link[link_id]->beacon_stats.accu_num_beacons += + mvmvif->link[link_id]->beacon_stats.num_beacons; + + sig = -le32_to_cpu(link_stats->beacon_filter_average_energy); + iwl_mvm_update_vif_sig(bss_conf->vif, sig); + + if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX, + "invalid mvmvif id: %d", mvmvif->id)) + continue; + + air_time[mvmvif->id] += + le32_to_cpu(per_link[fw_link_id].air_time); + rx_bytes[mvmvif->id] += + le32_to_cpu(per_link[fw_link_id].rx_bytes); + } + + /* Don't update in case the statistics are not cleared, since + * we will end up counting twice the same airtime, once in TCM + * request and once in statistics notification. + */ + if (mvm->statistics_clear) { + __le32 air_time_le[MAC_INDEX_AUX]; + __le32 rx_bytes_le[MAC_INDEX_AUX]; + int vif_id; + + for (vif_id = 0; vif_id < ARRAY_SIZE(air_time_le); vif_id++) { + air_time_le[vif_id] = cpu_to_le32(air_time[vif_id]); + rx_bytes_le[vif_id] = cpu_to_le32(rx_bytes[vif_id]); + } + + iwl_mvm_update_tcm_from_stats(mvm, air_time_le, rx_bytes_le); + } +} + +void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_system_statistics_notif_oper *stats; + int i; + u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP, + STATISTICS_OPER_NOTIF, 0); + + if (notif_ver != 3) { + IWL_FW_CHECK_FAILED(mvm, + "Oper stats notif ver %d is not supported\n", + notif_ver); + return; + } + + stats = (void *)&pkt->data; + iwl_mvm_stat_iterator_all_links(mvm, stats->per_link); + + for (i = 0; i < ARRAY_SIZE(average_energy); i++) + average_energy[i] = + le32_to_cpu(stats->per_sta[i].average_energy); + + ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, + average_energy); +} + +void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_system_statistics_part1_notif_oper *part1_stats; + int i; + u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP, + STATISTICS_OPER_PART1_NOTIF, 0); + + if (notif_ver != 4) { + IWL_FW_CHECK_FAILED(mvm, + "Part1 stats notif ver %d is not supported\n", + notif_ver); + return; + } + + part1_stats = (void *)&pkt->data; + mvm->radio_stats.rx_time = 0; + mvm->radio_stats.tx_time = 0; + for (i = 0; i < ARRAY_SIZE(part1_stats->per_link); i++) { + mvm->radio_stats.rx_time += + le64_to_cpu(part1_stats->per_link[i].rx_time); + mvm->radio_stats.tx_time += + le64_to_cpu(part1_stats->per_link[i].tx_time); + } +} + static void iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) @@ -887,11 +1023,11 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, for (i = 0; i < ARRAY_SIZE(average_energy); i++) average_energy[i] = - le32_to_cpu(stats->per_sta_stats[i].average_energy); + le32_to_cpu(stats->per_sta[i].average_energy); for (i = 0; i < ARRAY_SIZE(air_time); i++) { - air_time[i] = stats->per_mac_stats[i].air_time; - rx_bytes[i] = stats->per_mac_stats[i].rx_bytes; + air_time[i] = stats->per_mac[i].air_time; + rx_bytes[i] = stats->per_mac[i].rx_bytes; } } @@ -917,6 +1053,13 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, __le32 *bytes, *air_time, flags; int expected_size; u8 *energy; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + return; /* From ver 14 and up we use TLV statistics format */ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 8d1e44fd9d..af15d470c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -376,8 +376,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ if (phy_info & IWL_RX_MPDU_PHY_AMPDU && (status & IWL_RX_MPDU_STATUS_SEC_MASK) == - IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) + IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) { + IWL_DEBUG_DROP(mvm, "Dropping packets, bad enc status\n"); return -1; + } if (unlikely(ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_has_protected(hdr->frame_control))) @@ -503,6 +505,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + if (WARN_ON_ONCE(!mvm_sta->dup_data)) + return false; + dup_data = &mvm_sta->dup_data[queue]; /* @@ -548,44 +554,12 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; } -/* - * Returns true if sn2 - buffer_size < sn1 < sn2. - * To be used only in order to compare reorder buffer head with NSSN. - * We fully trust NSSN unless it is behind us due to reorder timeout. - * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. - */ -static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) -{ - return ieee80211_sn_less(sn1, sn2) && - !ieee80211_sn_less(sn1, sn2 - buffer_size); -} - -static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) -{ - if (IWL_MVM_USE_NSSN_SYNC) { - struct iwl_mvm_nssn_sync_data notif = { - .baid = baid, - .nssn = nssn, - }; - - iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false, - ¬if, sizeof(notif)); - } -} - -#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) - -enum iwl_mvm_release_flags { - IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0), - IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1), -}; - static void iwl_mvm_release_frames(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct iwl_mvm_baid_data *baid_data, struct iwl_mvm_reorder_buffer *reorder_buf, - u16 nssn, u32 flags) + u16 nssn) { struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[reorder_buf->queue * @@ -594,31 +568,12 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, lockdep_assert_held(&reorder_buf->lock); - /* - * We keep the NSSN not too far behind, if we are sync'ing it and it - * is more than 2048 ahead of us, it must be behind us. Discard it. - * This can happen if the queue that hit the 0 / 2048 seqno was lagging - * behind and this queue already processed packets. The next if - * would have caught cases where this queue would have processed less - * than 64 packets, but it may have processed more than 64 packets. - */ - if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) && - ieee80211_sn_less(nssn, ssn)) - goto set_timer; - - /* ignore nssn smaller than head sn - this can happen due to timeout */ - if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) - goto set_timer; - - while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { + while (ieee80211_sn_less(ssn, nssn)) { int index = ssn % reorder_buf->buf_size; - struct sk_buff_head *skb_list = &entries[index].e.frames; + struct sk_buff_head *skb_list = &entries[index].frames; struct sk_buff *skb; ssn = ieee80211_sn_inc(ssn); - if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) && - (ssn == 2048 || ssn == 0)) - iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn); /* * Empty the list. Will have more than one frame for A-MSDU. @@ -633,99 +588,6 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, } } reorder_buf->head_sn = nssn; - -set_timer: - if (reorder_buf->num_stored && !reorder_buf->removed) { - u16 index = reorder_buf->head_sn % reorder_buf->buf_size; - - while (skb_queue_empty(&entries[index].e.frames)) - index = (index + 1) % reorder_buf->buf_size; - /* modify timer to match next frame's expiration time */ - mod_timer(&reorder_buf->reorder_timer, - entries[index].e.reorder_time + 1 + - RX_REORDER_BUF_TIMEOUT_MQ); - } else { - del_timer(&reorder_buf->reorder_timer); - } -} - -void iwl_mvm_reorder_timer_expired(struct timer_list *t) -{ - struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer); - struct iwl_mvm_baid_data *baid_data = - iwl_mvm_baid_data_from_reorder_buf(buf); - struct iwl_mvm_reorder_buf_entry *entries = - &baid_data->entries[buf->queue * baid_data->entries_per_queue]; - int i; - u16 sn = 0, index = 0; - bool expired = false; - bool cont = false; - - spin_lock(&buf->lock); - - if (!buf->num_stored || buf->removed) { - spin_unlock(&buf->lock); - return; - } - - for (i = 0; i < buf->buf_size ; i++) { - index = (buf->head_sn + i) % buf->buf_size; - - if (skb_queue_empty(&entries[index].e.frames)) { - /* - * If there is a hole and the next frame didn't expire - * we want to break and not advance SN - */ - cont = false; - continue; - } - if (!cont && - !time_after(jiffies, entries[index].e.reorder_time + - RX_REORDER_BUF_TIMEOUT_MQ)) - break; - - expired = true; - /* continue until next hole after this expired frames */ - cont = true; - sn = ieee80211_sn_add(buf->head_sn, i + 1); - } - - if (expired) { - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - u8 sta_id = ffs(baid_data->sta_mask) - 1; - - rcu_read_lock(); - sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { - rcu_read_unlock(); - goto out; - } - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - /* SN is set to the last expired frame + 1 */ - IWL_DEBUG_HT(buf->mvm, - "Releasing expired frames for sta %u, sn %d\n", - sta_id, sn); - iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, - sta, baid_data->tid); - iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, - buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); - rcu_read_unlock(); - } else { - /* - * If no frame expired and there are stored frames, index is now - * pointing to the first unexpired frame - modify timer - * accordingly to this frame. - */ - mod_timer(&buf->reorder_timer, - entries[index].e.reorder_time + - 1 + RX_REORDER_BUF_TIMEOUT_MQ); - } - -out: - spin_unlock(&buf->lock); } static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, @@ -758,10 +620,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, - reorder_buf->buf_size), - 0); + reorder_buf->buf_size)); spin_unlock_bh(&reorder_buf->lock); - del_timer_sync(&reorder_buf->reorder_timer); out: rcu_read_unlock(); @@ -769,8 +629,7 @@ out: static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, struct napi_struct *napi, - u8 baid, u16 nssn, int queue, - u32 flags) + u8 baid, u16 nssn, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; @@ -788,8 +647,7 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, ba_data = rcu_dereference(mvm->baid_map[baid]); if (!ba_data) { - WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC), - "BAID %d not found in map\n", baid); + WARN(true, "BAID %d not found in map\n", baid); goto out; } @@ -803,22 +661,13 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, napi, ba_data, - reorder_buf, nssn, flags); + reorder_buf, nssn); spin_unlock_bh(&reorder_buf->lock); out: rcu_read_unlock(); } -static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm, - struct napi_struct *napi, int queue, - const struct iwl_mvm_nssn_sync_data *data) -{ - iwl_mvm_release_frames_from_notif(mvm, napi, data->baid, - data->nssn, queue, - IWL_MVM_RELEASE_FROM_RSS_SYNC); -} - void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -853,14 +702,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, break; iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; - case IWL_MVM_RXQ_NSSN_SYNC: - if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data), - "invalid nssn sync notification size %d (%d)", - len, (int)sizeof(struct iwl_mvm_nssn_sync_data))) - break; - iwl_mvm_nssn_sync(mvm, napi, queue, - (void *)internal_notif->data); - break; default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } @@ -874,55 +715,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, } } -static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, int tid, - struct iwl_mvm_reorder_buffer *buffer, - u32 reorder, u32 gp2, int queue) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - if (gp2 != buffer->consec_oldsn_ampdu_gp2) { - /* we have a new (A-)MPDU ... */ - - /* - * reset counter to 0 if we didn't have any oldsn in - * the last A-MPDU (as detected by GP2 being identical) - */ - if (!buffer->consec_oldsn_prev_drop) - buffer->consec_oldsn_drops = 0; - - /* either way, update our tracking state */ - buffer->consec_oldsn_ampdu_gp2 = gp2; - } else if (buffer->consec_oldsn_prev_drop) { - /* - * tracking state didn't change, and we had an old SN - * indication before - do nothing in this case, we - * already noted this one down and are waiting for the - * next A-MPDU (by GP2) - */ - return; - } - - /* return unless this MPDU has old SN */ - if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)) - return; - - /* update state */ - buffer->consec_oldsn_prev_drop = 1; - buffer->consec_oldsn_drops++; - - /* if limit is reached, send del BA and reset state */ - if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) { - IWL_WARN(mvm, - "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n", - IWL_MVM_AMPDU_CONSEC_DROPS_DELBA, - sta->addr, queue, tid); - ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr); - buffer->consec_oldsn_prev_drop = 0; - buffer->consec_oldsn_drops = 0; - } -} - /* * Returns true if the MPDU was buffered\dropped, false if it should be passed * to upper layer. @@ -934,11 +726,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); struct iwl_mvm_baid_data *baid_data; struct iwl_mvm_reorder_buffer *buffer; - struct sk_buff *tail; u32 reorder = le32_to_cpu(desc->reorder_data); bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; bool last_subframe = @@ -955,6 +745,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; + if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) + return false; + /* * This also covers the case of receiving a Block Ack Request * outside a BA session; we'll pass it to mac80211 and that @@ -1016,59 +809,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, buffer->valid = true; } - if (ieee80211_is_back_req(hdr->frame_control)) { - iwl_mvm_release_frames(mvm, sta, napi, baid_data, - buffer, nssn, 0); + /* drop any duplicated packets */ + if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE)) goto drop; - } - - /* - * If there was a significant jump in the nssn - adjust. - * If the SN is smaller than the NSSN it might need to first go into - * the reorder buffer, in which case we just release up to it and the - * rest of the function will take care of storing it and releasing up to - * the nssn. - * This should not happen. This queue has been lagging and it should - * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice - * and update the other queues. - */ - if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, - buffer->buf_size) || - !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { - u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; - - iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, - min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); - } - - iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder, - rx_status->device_timestamp, queue); /* drop any oudated packets */ - if (ieee80211_sn_less(sn, buffer->head_sn)) + if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) goto drop; /* release immediately if allowed by nssn and no stored frames */ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { - if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, - buffer->buf_size) && - (!amsdu || last_subframe)) { - /* - * If we crossed the 2048 or 0 SN, notify all the - * queues. This is done in order to avoid having a - * head_sn that lags behind for too long. When that - * happens, we can get to a situation where the head_sn - * is within the interval [nssn - buf_size : nssn] - * which will make us think that the nssn is a packet - * that we already freed because of the reordering - * buffer and we will ignore it. So maintain the - * head_sn somewhat updated across all the queues: - * when it crosses 0 and 2048. - */ - if (sn == 2048 || sn == 0) - iwl_mvm_sync_nssn(mvm, baid, sn); + if (!amsdu || last_subframe) buffer->head_sn = nssn; - } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; @@ -1083,37 +835,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, * while technically there is no hole and we can move forward. */ if (!buffer->num_stored && sn == buffer->head_sn) { - if (!amsdu || last_subframe) { - if (sn == 2048 || sn == 0) - iwl_mvm_sync_nssn(mvm, baid, sn); + if (!amsdu || last_subframe) buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); - } + /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } - index = sn % buffer->buf_size; - - /* - * Check if we already stored this frame - * As AMSDU is either received or not as whole, logic is simple: - * If we have frames in that position in the buffer and the last frame - * originated from AMSDU had a different SN then it is a retransmission. - * If it is the same SN then if the subframe index is incrementing it - * is the same AMSDU - otherwise it is a retransmission. - */ - tail = skb_peek_tail(&entries[index].e.frames); - if (tail && !amsdu) - goto drop; - else if (tail && (sn != buffer->last_amsdu || - buffer->last_sub_index >= sub_frame_idx)) - goto drop; - /* put in reorder buffer */ - __skb_queue_tail(&entries[index].e.frames, skb); + index = sn % buffer->buf_size; + __skb_queue_tail(&entries[index].frames, skb); buffer->num_stored++; - entries[index].e.reorder_time = jiffies; if (amsdu) { buffer->last_amsdu = sn; @@ -1133,8 +866,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, */ if (!amsdu || last_subframe) iwl_mvm_release_frames(mvm, sta, napi, baid_data, - buffer, nssn, - IWL_MVM_RELEASE_SEND_RSS_SYNC); + buffer, nssn); spin_unlock_bh(&buffer->lock); return true; @@ -1507,7 +1239,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, #define IWL_RX_RU_DATA_A1 2 #define IWL_RX_RU_DATA_A2 2 #define IWL_RX_RU_DATA_B1 2 -#define IWL_RX_RU_DATA_B2 3 +#define IWL_RX_RU_DATA_B2 4 #define IWL_RX_RU_DATA_C1 3 #define IWL_RX_RU_DATA_C2 3 #define IWL_RX_RU_DATA_D1 4 @@ -2562,6 +2294,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_rx_csum(mvm, sta, skb, pkt); if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { + IWL_DEBUG_DROP(mvm, "Dropping duplicate packet 0x%x\n", + le16_to_cpu(hdr->seq_ctrl)); kfree_skb(skb); goto out; } @@ -2613,9 +2347,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) && likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) && - likely(!iwl_mvm_mei_filter_scan(mvm, skb))) + likely(!iwl_mvm_mei_filter_scan(mvm, skb))) { + if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && + (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && + !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) + rx_status->flag |= RX_FLAG_AMSDU_MORE; + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, link_sta); + } out: rcu_read_unlock(); } @@ -2758,7 +2498,7 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_release_frames_from_notif(mvm, napi, release->baid, le16_to_cpu(release->nssn), - queue, 0); + queue); } void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, @@ -2799,7 +2539,10 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, tid)) goto out; - iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0); + IWL_DEBUG_DROP(mvm, "Received a BAR, expect packet loss: nssn %d\n", + nssn); + + iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue); out: rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 3cbe2c0b8d..75c5c58e14 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -3408,7 +3408,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) if (!(mvm->scan_status & type)) return 0; - if (iwl_mvm_is_radio_killed(mvm)) { + if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { ret = 0; goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 2c231f4623..bba96a9688 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -827,7 +827,7 @@ static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta) if (!link) continue; - /* support for 1k ba size */ + /* support for 512 ba size */ if (link->eht_cap.has_eht && max_size < IWL_DEFAULT_QUEUE_SIZE_EHT) max_size = IWL_DEFAULT_QUEUE_SIZE_EHT; @@ -865,11 +865,11 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; unsigned int link_id; - for (link_id = 0; - link_id < ARRAY_SIZE(mvmsta->link); - link_id++) { + rcu_read_lock(); + for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) { struct iwl_mvm_link_sta *link = rcu_dereference_protected(mvmsta->link[link_id], lockdep_is_held(&mvm->mutex)); @@ -879,6 +879,7 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, sta_mask |= BIT(link->sta_id); } + rcu_read_unlock(); } else { sta_mask |= BIT(sta_id); } @@ -2718,18 +2719,9 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, WARN_ON(1); for (j = 0; j < reorder_buf->buf_size; j++) - __skb_queue_purge(&entries[j].e.frames); - /* - * Prevent timer re-arm. This prevents a very far fetched case - * where we timed out on the notification. There may be prior - * RX frames pending in the RX queue before the notification - * that might get processed between now and the actual deletion - * and we would re-arm the timer although we are deleting the - * reorder buffer. - */ - reorder_buf->removed = true; + __skb_queue_purge(&entries[j].frames); + spin_unlock_bh(&reorder_buf->lock); - del_timer_sync(&reorder_buf->reorder_timer); } } @@ -2749,15 +2741,12 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; reorder_buf->buf_size = buf_size; - /* rx reorder timer */ - timer_setup(&reorder_buf->reorder_timer, - iwl_mvm_reorder_timer_expired, 0); spin_lock_init(&reorder_buf->lock); reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) - __skb_queue_head_init(&entries[j].e.frames); + __skb_queue_head_init(&entries[j].frames); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 7364346a12..b33a0ce096 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -286,12 +286,10 @@ struct iwl_mvm_key_pn { * * @IWL_MVM_RXQ_EMPTY: empty sync notification * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA - * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN */ enum iwl_mvm_rxq_notif_type { IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, - IWL_MVM_RXQ_NSSN_SYNC, }; /** @@ -315,11 +313,6 @@ struct iwl_mvm_delba_data { u32 baid; } __packed; -struct iwl_mvm_nssn_sync_data { - u32 baid; - u32 nssn; -} __packed; - /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection @@ -356,6 +349,7 @@ struct iwl_mvm_link_sta { /** * struct iwl_mvm_sta - representation of a station in the driver + * @vif: the interface the station belongs to * @tfd_queue_msk: the tfd queues used by the station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for @@ -380,6 +374,7 @@ struct iwl_mvm_link_sta { * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. * In case TLC offload is not active it is either 0xFFFF or 0. * @max_amsdu_len: max AMSDU length + * @sleeping: indicates the station is sleeping (when not offloaded to FW) * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleeping: sta sleep transitions in power management * @sleep_tx_count: the number of frames that we told the firmware to let out @@ -389,7 +384,6 @@ struct iwl_mvm_link_sta { * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data - * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index dae6f2a1aa..e7d5f4ebeb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2022 Intel Corporation + * Copyright (C) 2018-2020, 2022-2023 Intel Corporation */ #include #include "mvm.h" @@ -144,7 +144,8 @@ void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + unsigned int link_id) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; @@ -154,7 +155,7 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, duration, - duration, true); + duration, true, link_id); else iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 158266719f..2e653a417d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -42,6 +42,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, te_data->uid = 0; te_data->id = TE_MAX; te_data->vif = NULL; + te_data->link_id = -1; } void iwl_mvm_roc_done_wk(struct work_struct *wk) @@ -244,7 +245,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, } iwl_mvm_csa_client_absent(mvm, te_data->vif); cancel_delayed_work(&mvmvif->csa_work); - ieee80211_chswitch_done(te_data->vif, true); + ieee80211_chswitch_done(te_data->vif, true, 0); break; default: /* should never happen */ @@ -399,6 +400,22 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, } } +void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_roc_notif *notif = (void *)pkt->data; + + if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) && + le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) { + set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); + ieee80211_ready_on_channel(mvm->hw); + } else { + iwl_mvm_roc_finished(mvm); + ieee80211_remain_on_channel_expired(mvm->hw); + } +} + /* * Handle A Aux ROC time event */ @@ -672,19 +689,46 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, } } +/* Determine whether mac or link id should be used, and validate the link id */ +static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 link_id) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, + SESSION_PROTECTION_CMD), 1); + + if (ver < 2) + return mvmvif->id; + + if (WARN(link_id < 0 || !mvmvif->link[link_id], + "Invalid link ID for session protection: %u\n", link_id)) + return -EINVAL; + + if (WARN(ieee80211_vif_is_mld(vif) && + !(vif->active_links & BIT(link_id)), + "Session Protection on an inactive link: %u\n", link_id)) + return -EINVAL; + + return mvmvif->link[link_id]->fw_link_id; +} + static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - u32 id) + struct ieee80211_vif *vif, + u32 id, u32 link_id) { + int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id); struct iwl_mvm_session_prot_cmd cmd = { - .id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), + .id_and_color = cpu_to_le32(mac_link_id), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), .conf_id = cpu_to_le32(id), }; int ret; + if (mac_link_id < 0) + return; + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd); @@ -698,10 +742,12 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, u32 *uid) { u32 id; + struct ieee80211_vif *vif = te_data->vif; struct iwl_mvm_vif *mvmvif; enum nl80211_iftype iftype; + unsigned int link_id; - if (!te_data->vif) + if (!vif) return false; mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); @@ -716,6 +762,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, /* Save time event uid before clearing its data */ *uid = te_data->uid; id = te_data->id; + link_id = te_data->link_id; /* * The clear_data function handles time events that were already removed @@ -733,7 +780,8 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, id != HOT_SPOT_CMD) { if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { /* Session protection is still ongoing. Cancel it */ - iwl_mvm_cancel_session_protection(mvm, mvmvif, id); + iwl_mvm_cancel_session_protection(mvm, vif, id, + link_id); if (iftype == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_p2p_roc_finished(mvm); } @@ -850,18 +898,41 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data; + unsigned int ver = + iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, + SESSION_PROTECTION_CMD), 2); + int id = le32_to_cpu(notif->mac_link_id); struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; + unsigned int notif_link_id; rcu_read_lock(); - vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id), - true); + + if (ver <= 2) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + } else { + struct ieee80211_bss_conf *link_conf = + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true); + + if (!link_conf) + goto out_unlock; + + notif_link_id = link_conf->link_id; + vif = link_conf->vif; + } if (!vif) goto out_unlock; mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 && + mvmvif->time_event_data.link_id != notif_link_id, + "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n", + notif_link_id, mvmvif->time_event_data.link_id)) + goto out_unlock; + /* The vif is not a P2P_DEVICE, maintain its time_event_data */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { struct iwl_mvm_time_event_data *te_data = @@ -901,6 +972,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; + mvmvif->time_event_data.link_id = -1; iwl_mvm_p2p_roc_finished(mvm); ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { @@ -924,8 +996,7 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_session_prot_cmd cmd = { .id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), + cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; @@ -935,6 +1006,9 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, /* The time_event_data.id field is reused to save session * protection's configuration. */ + + mvmvif->time_event_data.link_id = 0; + switch (type) { case IEEE80211_ROC_TYPE_NORMAL: mvmvif->time_event_data.id = @@ -1051,6 +1125,37 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) __iwl_mvm_remove_time_event(mvm, te_data, &uid); } +static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) +{ + int ret; + struct iwl_roc_req roc_cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .activity = cpu_to_le32(activity), + }; + + lockdep_assert_held(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), + 0, sizeof(roc_cmd), &roc_cmd); + WARN_ON(ret); +} + +static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif) +{ + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); + u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + + if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, + &mvmvif->hs_time_event_data); + else if (fw_ver == 3) + iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT); + else + IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver); +} + void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif; @@ -1061,12 +1166,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - iwl_mvm_cancel_session_protection(mvm, mvmvif, - mvmvif->time_event_data.id); + iwl_mvm_cancel_session_protection(mvm, vif, + mvmvif->time_event_data.id, + mvmvif->time_event_data.link_id); iwl_mvm_p2p_roc_finished(mvm); } else { - iwl_mvm_remove_aux_roc_te(mvm, mvmvif, - &mvmvif->hs_time_event_data); + iwl_mvm_roc_station_remove(mvm, mvmvif); iwl_mvm_roc_finished(mvm); } @@ -1185,25 +1290,28 @@ static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - bool wait_for_notif) + bool wait_for_notif, + unsigned int link_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) }; struct iwl_notification_wait wait_notif; + int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id); struct iwl_mvm_session_prot_cmd cmd = { - .id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), + .id_and_color = cpu_to_le32(mac_link_id), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; + if (mac_link_id < 0) + return; + lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); - if (te_data->running && + if (te_data->running && te_data->link_id == link_id && time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", jiffies_to_msecs(te_data->end_jiffies - jiffies)); @@ -1220,6 +1328,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, te_data->id = le32_to_cpu(cmd.conf_id); te_data->duration = le32_to_cpu(cmd.duration_tu); te_data->vif = vif; + te_data->link_id = link_id; spin_unlock_bh(&mvm->time_event_lock); IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", @@ -1229,11 +1338,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { - IWL_ERR(mvm, - "Couldn't send the SESSION_PROTECTION_CMD\n"); - spin_lock_bh(&mvm->time_event_lock); - iwl_mvm_te_clear_data(mvm, te_data); - spin_unlock_bh(&mvm->time_event_lock); + goto send_cmd_err; } return; @@ -1246,12 +1351,19 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { - IWL_ERR(mvm, - "Couldn't send the SESSION_PROTECTION_CMD\n"); iwl_remove_notification(&mvm->notif_wait, &wait_notif); + goto send_cmd_err; } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, TU_TO_JIFFIES(100))) { IWL_ERR(mvm, "Failed to protect session until session protection\n"); } + return; + +send_cmd_err: + IWL_ERR(mvm, + "Couldn't send the SESSION_PROTECTION_CMD\n"); + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index 989a5319fb..49256ba4cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2012-2014, 2019-2020, 2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #ifndef __time_event_h__ @@ -100,6 +100,14 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +/** + * iwl_mvm_rx_roc_notif - handles %DISCOVERY_ROC_NTF. + * @mvm: the mvm component + * @rxb: RX buffer + */ +void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + /** * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality * @mvm: the mvm component @@ -134,7 +142,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /** * iwl_mvm_remove_time_event - general function to clean up of time event * @mvm: the mvm component - * @vif: the vif to which the time event belongs + * @mvmvif: the vif to which the time event belongs * @te_data: the time event data that corresponds to that time event * * This function can be used to cancel a time event regardless its type. @@ -195,16 +203,21 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data) * iwl_mvm_schedule_session_protection - schedule a session protection * @mvm: the mvm component * @vif: the virtual interface for which the protection issued - * @duration: the duration of the protection + * @duration: the requested duration of the protection + * @min_duration: the minimum duration of the protection * @wait_for_notif: if true, will block until the start of the protection + * @link_id: The link to schedule a session protection for */ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - bool wait_for_notif); + bool wait_for_notif, + unsigned int link_id); /** * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF + * @mvm: the mvm component + * @rxb: the RX buffer containing the notification */ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 157e96fa23..dee9c367dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -642,7 +642,6 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, int trip, int temp) { struct iwl_mvm *mvm = thermal_zone_device_priv(device); - struct iwl_mvm_thermal_device *tzone; int ret; mutex_lock(&mvm->mutex); @@ -658,12 +657,6 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, goto out; } - tzone = &mvm->tz_device; - if (!tzone) { - ret = -EIO; - goto out; - } - ret = iwl_mvm_send_temp_report_ths_cmd(mvm); out: mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6fdb2c3851..db986bfc4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -262,8 +262,42 @@ static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; } +static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + int rate_idx) +{ + u32 rate_flags = 0; + u8 rate_plcp; + bool is_cck; + + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) + rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm, + info, + info->control.vif); + + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); + is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && + (rate_idx <= IWL_LAST_CCK_RATE); + + /* Set CCK or OFDM flag */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { + if (!is_cck) + rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; + else + rate_flags |= RATE_MCS_CCK_MSK; + } else if (is_cck) { + rate_flags |= RATE_MCS_CCK_MSK_V1; + } + + return (u32)rate_plcp | rate_flags; +} + static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info) + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + __le16 fc) { struct ieee80211_tx_rate *rate = &info->control.rates[0]; u32 result; @@ -288,6 +322,9 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1); else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1); + + if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) + result = iwl_new_rate_from_v1(result); } else if (rate->flags & IEEE80211_TX_RC_MCS) { result = RATE_MCS_HT_MSK_V1; result |= u32_encode_bits(rate->idx, @@ -301,12 +338,21 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, result |= RATE_MCS_LDPC_MSK_V1; if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) result |= RATE_MCS_STBC_MSK; + + if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) + result = iwl_new_rate_from_v1(result); } else { - return 0; + int rate_idx = info->control.rates[0].idx; + + result = iwl_mvm_convert_rate_idx(mvm, info, rate_idx); } - if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) - return iwl_new_rate_from_v1(result); + if (info->control.antennas) + result |= u32_encode_bits(info->control.antennas, + RATE_MCS_ANT_AB_MSK); + else + result |= iwl_mvm_get_tx_ant(mvm, info, sta, fc); + return result; } @@ -315,17 +361,8 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, __le16 fc) { int rate_idx = -1; - u8 rate_plcp; - u32 rate_flags = 0; - bool is_cck; - - if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { - u32 result = iwl_mvm_get_inject_tx_rate(mvm, info); - if (result) - return result; - rate_idx = info->control.rates[0].idx; - } else if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { + if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { /* info->control is only relevant for non HW rate control */ /* HT rate doesn't make sense for a non data frame */ @@ -350,33 +387,16 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); } - /* if the rate isn't a well known legacy rate, take the lowest one */ - if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) - rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm, - info, - info->control.vif); - - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); - is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE); - - /* Set CCK or OFDM flag */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { - if (!is_cck) - rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; - else - rate_flags |= RATE_MCS_CCK_MSK; - } else if (is_cck) { - rate_flags |= RATE_MCS_CCK_MSK_V1; - } - - return (u32)rate_plcp | rate_flags; + return iwl_mvm_convert_rate_idx(mvm, info, rate_idx); } static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) + return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc); + return iwl_mvm_get_tx_rate(mvm, info, sta, fc) | iwl_mvm_get_tx_ant(mvm, info, sta, fc); } @@ -1704,7 +1724,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1))) - ieee80211_tx_status(mvm->hw, skb); + ieee80211_tx_status_skb(mvm->hw, skb); } /* This is an aggregation queue or might become one, so we use @@ -2060,7 +2080,7 @@ out: while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); - ieee80211_tx_status(mvm->hw, skb); + ieee80211_tx_status_skb(mvm->hw, skb); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 48016b4343..91286018a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -342,6 +342,60 @@ static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, return true; } +static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear, + u8 cmd_ver) +{ + struct iwl_system_statistics_cmd system_cmd = { + .cfg_mask = clear ? + cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) : + cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK | + IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK), + .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER | + IWL_STATS_NTFY_TYPE_ID_OPER_PART1), + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD), + .len[0] = sizeof(system_cmd), + .data[0] = &system_cmd, + }; + struct iwl_notification_wait stats_wait; + static const u16 stats_complete[] = { + WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF), + }; + int ret; + + if (cmd_ver != 1) { + IWL_FW_CHECK_FAILED(mvm, + "Invalid system statistics command version:%d\n", + cmd_ver); + return -EOPNOTSUPP; + } + + iwl_init_notification_wait(&mvm->notif_wait, &stats_wait, + stats_complete, ARRAY_SIZE(stats_complete), + NULL, NULL); + + mvm->statistics_clear = clear; + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + iwl_remove_notification(&mvm->notif_wait, &stats_wait); + return ret; + } + + /* 500ms for OPERATIONAL, PART1 and END notification should be enough + * for FW to collect data from all LMACs and send + * STATISTICS_NOTIFICATION to host + */ + ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2); + if (ret) + return ret; + + if (clear) + iwl_mvm_accu_radio_stats(mvm); + + return ret; +} + int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) { struct iwl_statistics_cmd scmd = { @@ -353,8 +407,15 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) .len[0] = sizeof(scmd), .data[0] = &scmd, }; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); int ret; + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver); + /* From version 15 - STATISTICS_NOTIFICATION, the reply for * STATISTICS_CMD is empty, and the response is with * STATISTICS_NOTIFICATION notification diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index bc83d2ba55..26a0953603 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1134,7 +1134,7 @@ static int get_crf_id(struct iwl_trans *iwl_trans) /* Enable access to peripheral registers */ val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); - val |= ENABLE_WFPM; + val |= WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK; iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ @@ -1196,6 +1196,9 @@ static int map_crf_id(struct iwl_trans *iwl_trans) case REG_CRF_ID_TYPE_FMR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); break; + case REG_CRF_ID_TYPE_WHP: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); + break; default: ret = -EIO; IWL_ERR(iwl_trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 5602441df2..7805a42948 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -58,10 +58,7 @@ struct iwl_rx_mem_buffer { bool invalid; }; -/** - * struct isr_statistics - interrupt statistics - * - */ +/* interrupt statistics */ struct isr_statistics { u32 hw; u32 sw; @@ -127,6 +124,8 @@ struct iwl_rx_completion_desc_bz { * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet + * @write_actual: actual write pointer written to device, since we update in + * blocks of 8 only * @free_count: Number of pre-allocated buffers in rx_free * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: @@ -135,10 +134,12 @@ struct iwl_rx_completion_desc_bz { * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status - * @lock: + * @lock: per-queue lock * @queue: actual rx queue. Not used for multi-rx queue. * @next_rb_is_fragment: indicates that the previous RB that we handled set * the fragmented flag, so the next one is still another fragment + * @napi: NAPI struct for this queue + * @queue_size: size of this queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ @@ -188,19 +189,20 @@ struct iwl_rb_allocator { /** * iwl_get_closed_rb_stts - get closed rb stts from different structs - * @rxq - the rxq to get the rb stts from + * @trans: transport pointer (for configuration) + * @rxq: the rxq to get the rb stts from */ -static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans, + struct iwl_rxq *rxq) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { __le16 *rb_stts = rxq->rb_stts; - return READ_ONCE(*rb_stts); + return le16_to_cpu(READ_ONCE(*rb_stts)); } else { struct iwl_rb_status *rb_stts = rxq->rb_stts; - return READ_ONCE(rb_stts->closed_rb_num); + return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF; } } @@ -243,6 +245,7 @@ enum iwl_image_response_code { IWL_IMAGE_RESP_FAIL = 2, }; +#ifdef CONFIG_IWLWIFI_DEBUGFS /** * struct cont_rec: continuous recording data structure * @prev_wr_ptr: the last address that was read in monitor_data @@ -253,7 +256,6 @@ enum iwl_image_response_code { * in &iwl_fw_mon_dbgfs_state enum * @mutex: locked while reading from monitor_data debugfs file */ -#ifdef CONFIG_IWLWIFI_DEBUGFS struct cont_rec { u32 prev_wr_ptr; u32 prev_wrap_cnt; @@ -298,10 +300,6 @@ enum iwl_pcie_imr_status { * @prph_info_dma_addr: dma addr of prph info * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information - * @init_dram: DRAM data of firmware image (including paging). - * Context information addresses will be taken from here. - * This is driver's local copy for keeping track of size and - * count for allocating and freeing the memory. * @iml: image loader image virtual address * @iml_dma_addr: image loader image DMA address * @trans: pointer to the generic transport area @@ -321,10 +319,9 @@ enum iwl_pcie_imr_status { * @rx_buf_bytes: RX buffer (RB) size in bytes * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw - * @cmd_in_flight: true when we have a host command in flight -#ifdef CONFIG_IWLWIFI_DEBUGFS * @fw_mon_data: fw continuous recording data -#endif + * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround + * during commands in flight * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles @@ -344,8 +341,32 @@ enum iwl_pcie_imr_status { * @alloc_page: allocated page to still use parts of * @alloc_page_used: how much of the allocated page was already used (bytes) * @imr_status: imr dma state machine - * @wait_queue_head_t: imr wait queue for dma completion + * @imr_waitq: imr wait queue for dma completion * @rf_name: name/version of the CRF, if any + * @use_ict: whether or not ICT (interrupt table) is used + * @ict_index: current ICT read index + * @ict_tbl: ICT table pointer + * @ict_tbl_dma: ICT table DMA address + * @inta_mask: interrupt (INT-A) mask + * @irq_lock: lock to synchronize IRQ handling + * @txq_memory: TXQ allocation array + * @sx_waitq: waitqueue for Sx transitions + * @sx_complete: completion for Sx transitions + * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already + * @opmode_down: indicates opmode went away + * @num_rx_bufs: number of RX buffers to allocate/use + * @no_reclaim_cmds: special commands not using reclaim flow + * (firmware workaround) + * @n_no_reclaim_cmds: number of special commands not using reclaim flow + * @affinity_mask: IRQ affinity mask for each RX queue + * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio + * enable/disable + * @fw_reset_handshake: indicates FW reset handshake is needed + * @fw_reset_state: state of FW reset handshake + * @fw_reset_waitq: waitqueue for FW reset handshake + * @is_down: indicates the NIC is down + * @isr_stats: interrupt statistics + * @napi_dev: (fake) netdev for NAPI registration */ struct iwl_trans_pcie { struct iwl_rxq *rxq; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 63091c45a5..07931c2db4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1510,7 +1510,7 @@ restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + r = iwl_get_closed_rb_stts(trans, rxq); i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ @@ -1660,9 +1660,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); local_bh_disable(); - if (napi_schedule_prep(&rxq->napi)) - __napi_schedule(&rxq->napi); - else + if (!napi_schedule(&rxq->napi)) iwl_pcie_clear_irq(trans, entry->entry); local_bh_enable(); @@ -2291,6 +2289,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) else sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; + if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) { + IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n", + inta_hw); + /* TODO: PLDR flow required here for >= Bz */ + } + /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { IWL_ERR(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 2ecf6db95f..c9e5bda8f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -231,11 +231,14 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); + int ret; /* TODO: most of the logic can be removed in A0 - but not in Z0 */ spin_lock_bh(&trans_pcie->irq_lock); - iwl_pcie_gen2_apm_init(trans); + ret = iwl_pcie_gen2_apm_init(trans); spin_unlock_bh(&trans_pcie->irq_lock); + if (ret) + return ret; iwl_op_mode_nic_config(trans->op_mode); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 1bc4a0089c..d10208075a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1111,6 +1111,7 @@ static const struct iwl_causes_list causes_list_common[] = { IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), @@ -2114,8 +2115,11 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) pci_lock_rescan_remove(); pci_dev_put(pdev); pci_stop_and_remove_bus_device(pdev); - if (removal->rescan) - pci_rescan_bus(bus->parent); + if (removal->rescan && bus) { + if (bus->parent) + bus = bus->parent; + pci_rescan_bus(bus); + } pci_unlock_rescan_remove(); kfree(removal); @@ -2175,6 +2179,9 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return false; + spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) @@ -2287,6 +2294,8 @@ out: static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { +#define IWL_MAX_HW_ERRS 5 + unsigned int num_consec_hw_errors = 0; int offs = 0; u32 *vals = buf; @@ -2302,6 +2311,17 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, while (offs < dwords) { vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + + if (iwl_trans_is_hw_error_value(vals[offs])) + num_consec_hw_errors++; + else + num_consec_hw_errors = 0; + + if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) { + iwl_trans_release_nic_access(trans); + return -EIO; + } + offs++; if (time_after(jiffies, end)) { @@ -2714,11 +2734,9 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { - u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, - rxq)); + u32 r = iwl_get_closed_rb_stts(trans, rxq); pos += scnprintf(buf + pos, bufsz - pos, - "\tclosed_rb_num: %u\n", - r & 0x0FFF); + "\tclosed_rb_num: %u\n", r); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); @@ -3091,7 +3109,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, spin_lock_bh(&rxq->lock); - r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + r = iwl_get_closed_rb_stts(trans, rxq); for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; @@ -3387,9 +3405,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = - le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) - & 0x0FFF; + num_rbs = iwl_get_closed_rb_stts(trans, rxq); num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + @@ -3599,10 +3615,19 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, int ret, addr_size; const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; void __iomem * const *table; + u32 bar0; if (!cfg_trans->gen2) ops = &trans_ops_pcie; + /* reassign our BAR 0 if invalid due to possible runtime PM races */ + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); + if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { + ret = pci_assign_resource(pdev, 0); + if (ret) + return ERR_PTR(ret); + } + ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h index 4c09bc1930..124b29aac4 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -71,7 +71,8 @@ static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) /** * iwl_txq_inc_wrap - increment queue index, wrap back to beginning - * @index -- current index + * @trans: the transport (for configuration data) + * @index: current index */ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) { @@ -81,7 +82,8 @@ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) /** * iwl_txq_dec_wrap - decrement queue index, wrap back to end - * @index -- current index + * @trans: the transport (for configuration data) + * @index: current index */ static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) { diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h index c17ab6dbbb..552ae33d78 100644 --- a/drivers/net/wireless/intersil/hostap/hostap.h +++ b/drivers/net/wireless/intersil/hostap/hostap.h @@ -92,7 +92,6 @@ void hostap_info_process(local_info_t *local, struct sk_buff *skb); extern const struct iw_handler_def hostap_iw_handler_def; extern const struct ethtool_ops prism2_ethtool_ops; -int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd); diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c index 3672291ced..5e5bada28b 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_download.c +++ b/drivers/net/wireless/intersil/hostap/hostap_download.c @@ -732,8 +732,7 @@ static int prism2_download(local_info_t *local, goto out; } - dl = kzalloc(sizeof(*dl) + param->num_areas * - sizeof(struct prism2_download_data_area), GFP_KERNEL); + dl = kzalloc(struct_size(dl, data, param->num_areas), GFP_KERNEL); if (dl == NULL) { ret = -ENOMEM; goto out; diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index b4adfc190a..26162f92e3 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -2316,21 +2316,6 @@ static const struct iw_priv_args prism2_priv[] = { }; -static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i) -{ - struct hostap_interface *iface; - local_info_t *local; - - iface = netdev_priv(dev); - local = iface->local; - - if (local->func->cmd(dev, HFA384X_CMDCODE_INQUIRE, *i, NULL, NULL)) - return -EOPNOTSUPP; - - return 0; -} - - static int prism2_ioctl_priv_prism2_param(struct net_device *dev, struct iw_request_info *info, union iwreq_data *uwrq, char *extra) @@ -2910,146 +2895,6 @@ static int prism2_ioctl_priv_writemif(struct net_device *dev, } -static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i) -{ - struct hostap_interface *iface; - local_info_t *local; - int ret = 0; - union iwreq_data wrqu; - - iface = netdev_priv(dev); - local = iface->local; - - printk(KERN_DEBUG "%s: process %d (%s) used deprecated iwpriv monitor " - "- update software to use iwconfig mode monitor\n", - dev->name, task_pid_nr(current), current->comm); - - /* Backward compatibility code - this can be removed at some point */ - - if (*i == 0) { - /* Disable monitor mode - old mode was not saved, so go to - * Master mode */ - wrqu.mode = IW_MODE_MASTER; - ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL); - } else if (*i == 1) { - /* netlink socket mode is not supported anymore since it did - * not separate different devices from each other and was not - * best method for delivering large amount of packets to - * user space */ - ret = -EOPNOTSUPP; - } else if (*i == 2 || *i == 3) { - switch (*i) { - case 2: - local->monitor_type = PRISM2_MONITOR_80211; - break; - case 3: - local->monitor_type = PRISM2_MONITOR_PRISM; - break; - } - wrqu.mode = IW_MODE_MONITOR; - ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL); - hostap_monitor_mode_enable(local); - } else - ret = -EINVAL; - - return ret; -} - - -static int prism2_ioctl_priv_reset(struct net_device *dev, int *i) -{ - struct hostap_interface *iface; - local_info_t *local; - - iface = netdev_priv(dev); - local = iface->local; - - printk(KERN_DEBUG "%s: manual reset request(%d)\n", dev->name, *i); - switch (*i) { - case 0: - /* Disable and enable card */ - local->func->hw_shutdown(dev, 1); - local->func->hw_config(dev, 0); - break; - - case 1: - /* COR sreset */ - local->func->hw_reset(dev); - break; - - case 2: - /* Disable and enable port 0 */ - local->func->reset_port(dev); - break; - - case 3: - prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING); - if (local->func->cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, - NULL)) - return -EINVAL; - break; - - case 4: - if (local->func->cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, - NULL)) - return -EINVAL; - break; - - default: - printk(KERN_DEBUG "Unknown reset request %d\n", *i); - return -EOPNOTSUPP; - } - - return 0; -} - - -static int prism2_ioctl_priv_set_rid_word(struct net_device *dev, int *i) -{ - int rid = *i; - int value = *(i + 1); - - printk(KERN_DEBUG "%s: Set RID[0x%X] = %d\n", dev->name, rid, value); - - if (hostap_set_word(dev, rid, value)) - return -EINVAL; - - return 0; -} - - -#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT -static int ap_mac_cmd_ioctl(local_info_t *local, int *cmd) -{ - int ret = 0; - - switch (*cmd) { - case AP_MAC_CMD_POLICY_OPEN: - local->ap->mac_restrictions.policy = MAC_POLICY_OPEN; - break; - case AP_MAC_CMD_POLICY_ALLOW: - local->ap->mac_restrictions.policy = MAC_POLICY_ALLOW; - break; - case AP_MAC_CMD_POLICY_DENY: - local->ap->mac_restrictions.policy = MAC_POLICY_DENY; - break; - case AP_MAC_CMD_FLUSH: - ap_control_flush_macs(&local->ap->mac_restrictions); - break; - case AP_MAC_CMD_KICKALL: - ap_control_kickall(local->ap); - hostap_deauth_all_stas(local->dev, local->ap, 0); - break; - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} -#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ - - #ifdef PRISM2_DOWNLOAD_SUPPORT static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p) { @@ -3963,79 +3808,6 @@ const struct iw_handler_def hostap_iw_handler_def = .get_wireless_stats = hostap_get_wireless_stats, }; -/* Private ioctls (iwpriv) that have not yet been converted - * into new wireless extensions API */ -int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct iwreq *wrq = (struct iwreq *) ifr; - struct hostap_interface *iface; - local_info_t *local; - int ret = 0; - - iface = netdev_priv(dev); - local = iface->local; - - switch (cmd) { - case PRISM2_IOCTL_INQUIRE: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_MONITOR: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_monitor(dev, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_RESET: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_reset(dev, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_WDS_ADD: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_wds_add(local, wrq->u.ap_addr.sa_data, 1); - break; - - case PRISM2_IOCTL_WDS_DEL: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_wds_del(local, wrq->u.ap_addr.sa_data, 1, 0); - break; - - case PRISM2_IOCTL_SET_RID_WORD: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_set_rid_word(dev, - (int *) wrq->u.name); - break; - -#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT - case PRISM2_IOCTL_MACCMD: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_mac_cmd_ioctl(local, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_ADDMAC: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_control_add_mac(&local->ap->mac_restrictions, - wrq->u.ap_addr.sa_data); - break; - case PRISM2_IOCTL_DELMAC: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_control_del_mac(&local->ap->mac_restrictions, - wrq->u.ap_addr.sa_data); - break; - case PRISM2_IOCTL_KICKMAC: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_control_kick_mac(local->ap, local->dev, - wrq->u.ap_addr.sa_data); - break; -#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} /* Private ioctls that are not used with iwpriv; * in SIOCDEVPRIVATE range */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 787f685e70..bf86ac26c2 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -796,7 +796,6 @@ static const struct net_device_ops hostap_netdev_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, - .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, @@ -809,7 +808,6 @@ static const struct net_device_ops hostap_mgmt_netdev_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, - .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, @@ -822,7 +820,6 @@ static const struct net_device_ops hostap_master_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, - .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h index c25cd21d18..f71c0545c0 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h +++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h @@ -617,7 +617,7 @@ struct prism2_download_data { u32 addr; /* wlan card address */ u32 len; u8 *data; /* allocated data */ - } data[]; + } data[] __counted_by(num_areas); }; diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h index 3356ea708d..522656de41 100644 --- a/drivers/net/wireless/intersil/p54/p54.h +++ b/drivers/net/wireless/intersil/p54/p54.h @@ -126,7 +126,7 @@ struct p54_cal_database { size_t entry_size; size_t offset; size_t len; - u8 data[]; + u8 data[] __counted_by(len); }; #define EEPROM_READBACK_LEN 0x3fc diff --git a/drivers/net/wireless/legacy/ray_cs.c b/drivers/net/wireless/legacy/ray_cs.c index 8ace797ce9..c95a79e01c 100644 --- a/drivers/net/wireless/legacy/ray_cs.c +++ b/drivers/net/wireless/legacy/ray_cs.c @@ -348,7 +348,7 @@ static int ray_config(struct pcmcia_device *link) { int ret = 0; int i; - struct net_device *dev = (struct net_device *)link->priv; + struct net_device *dev = link->priv; ray_dev_t *local = netdev_priv(dev); dev_dbg(&link->dev, "ray_config\n"); @@ -1830,7 +1830,7 @@ static void set_multicast_list(struct net_device *dev) =============================================================================*/ static irqreturn_t ray_interrupt(int irq, void *dev_id) { - struct net_device *dev = (struct net_device *)dev_id; + struct net_device *dev = dev_id; struct pcmcia_device *link; ray_dev_t *local; struct ccs __iomem *pccs; @@ -2567,7 +2567,7 @@ static int ray_cs_proc_show(struct seq_file *m, void *v) link = this_device; if (!link) return 0; - dev = (struct net_device *)link->priv; + dev = link->priv; if (!dev) return 0; local = netdev_priv(dev); diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index 2ea03725f1..da211372a4 100644 --- a/drivers/net/wireless/marvell/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c @@ -287,7 +287,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) mwifiex_dbg(priv->adapter, MSG, "indicating channel switch completion to kernel\n"); - mutex_lock(&priv->wdev.mtx); + wiphy_lock(priv->wdev.wiphy); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0); - mutex_unlock(&priv->wdev.mtx); + wiphy_unlock(priv->wdev.wiphy); } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 4389cf3f88..3604abcbcf 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1835,10 +1835,11 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy, */ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *data) + struct cfg80211_ap_update *params) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct mwifiex_adapter *adapter = priv->adapter; + struct cfg80211_beacon_data *data = ¶ms->beacon; mwifiex_cancel_scan(adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 7bdec6c622..d263eae607 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -834,12 +834,12 @@ struct mwifiex_if_ops { void (*cleanup_mpa_buf) (struct mwifiex_adapter *); int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); - int (*init_fw_port) (struct mwifiex_adapter *); + void (*init_fw_port)(struct mwifiex_adapter *adapter); int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); void (*card_reset) (struct mwifiex_adapter *); int (*reg_dump)(struct mwifiex_adapter *, char *); void (*device_dump)(struct mwifiex_adapter *); - int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); + void (*clean_pcie_ring)(struct mwifiex_adapter *adapter); void (*iface_work)(struct work_struct *work); void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 6697132ecc..5f997becdb 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -222,12 +222,19 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter, /* * This function writes data into PCIE card register. */ -static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) +static inline void +mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) { struct pcie_service_card *card = adapter->card; iowrite32(data, card->pci_mmap1 + reg); +} +/* Non-void wrapper needed for read_poll_timeout(). */ +static inline int +mwifiex_write_reg_rpt(struct mwifiex_adapter *adapter, int reg, u32 data) +{ + mwifiex_write_reg(adapter, reg, data); return 0; } @@ -658,12 +665,12 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) * appears to ignore or miss our wakeup request, so we continue trying * until we receive an interrupt from the card. */ - if (read_poll_timeout(mwifiex_write_reg, retval, + if (read_poll_timeout(mwifiex_write_reg_rpt, retval, READ_ONCE(adapter->int_status) != 0, 500, 500 * N_WAKEUP_TRIES_SHORT_INTERVAL, false, adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { - if (read_poll_timeout(mwifiex_write_reg, retval, + if (read_poll_timeout(mwifiex_write_reg_rpt, retval, READ_ONCE(adapter->int_status) != 0, 10000, 10000 * N_WAKEUP_TRIES_LONG_INTERVAL, false, @@ -703,24 +710,12 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) * The host interrupt mask is read, the disable bit is reset and * written back to the card host interrupt mask register. */ -static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) { - if (mwifiex_pcie_ok_to_access_hw(adapter)) { - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, - 0x00000000)) { - mwifiex_dbg(adapter, ERROR, - "Disable host interrupt failed\n"); - return -1; - } - } + if (mwifiex_pcie_ok_to_access_hw(adapter)) + mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, 0x00000000); atomic_set(&adapter->tx_hw_pending, 0); - return 0; -} - -static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter) -{ - WARN_ON(mwifiex_pcie_disable_host_int(adapter)); } /* @@ -731,15 +726,9 @@ static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter) */ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter) { - if (mwifiex_pcie_ok_to_access_hw(adapter)) { + if (mwifiex_pcie_ok_to_access_hw(adapter)) /* Simply write the mask to the register */ - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, - HOST_INTR_MASK)) { - mwifiex_dbg(adapter, ERROR, - "Enable host interrupt failed\n"); - return -1; - } - } + mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, HOST_INTR_MASK); return 0; } @@ -1303,7 +1292,7 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter) * This function defined as handler is also called while cleaning TXRX * during disconnect/ bss stop. */ -static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) +static void mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; @@ -1312,14 +1301,9 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) /* write pointer already set at last send * send dnld-rdy intr again, wait for completion. */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DNLD_RDY)) { - mwifiex_dbg(adapter, ERROR, - "failed to assert dnld-rdy interrupt.\n"); - return -1; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY); } - return 0; } /* @@ -1429,7 +1413,6 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 wrindx, num_tx_buffs, rx_val; - int ret; dma_addr_t buf_pa; struct mwifiex_pcie_buf_desc *desc = NULL; struct mwifiex_pfu_buf_desc *desc2 = NULL; @@ -1498,13 +1481,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, rx_val = card->rxbd_rdptr & reg->rx_wrap_mask; /* Write the TX ring write pointer in to reg->tx_wrptr */ - if (mwifiex_write_reg(adapter, reg->tx_wrptr, - card->txbd_wrptr | rx_val)) { - mwifiex_dbg(adapter, ERROR, - "SEND DATA: failed to write reg->tx_wrptr\n"); - ret = -1; - goto done_unmap; - } + mwifiex_write_reg(adapter, reg->tx_wrptr, + card->txbd_wrptr | rx_val); /* The firmware (latest version 15.68.19.p21) of the 88W8897 PCIe+USB card * seems to crash randomly after setting the TX ring write pointer when @@ -1521,13 +1499,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, adapter->data_sent = false; } else { /* Send the TX ready interrupt */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DNLD_RDY)) { - mwifiex_dbg(adapter, ERROR, - "SEND DATA: failed to assert dnld-rdy interrupt.\n"); - ret = -1; - goto done_unmap; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY); } mwifiex_dbg(adapter, DATA, "info: SEND DATA: Updated data_sent = true; /* Send the TX ready interrupt */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DNLD_RDY)) - mwifiex_dbg(adapter, ERROR, - "SEND DATA: failed to assert door-bell intr\n"); + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY); return -EBUSY; } return -EINPROGRESS; -done_unmap: - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - card->tx_buf_list[wrindx] = NULL; - atomic_dec(&adapter->tx_hw_pending); - if (reg->pfu_enabled) - memset(desc2, 0, sizeof(*desc2)); - else - memset(desc, 0, sizeof(*desc)); - - return ret; } /* @@ -1675,13 +1636,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) tx_val = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ - if (mwifiex_write_reg(adapter, reg->rx_rdptr, - card->rxbd_rdptr | tx_val)) { - mwifiex_dbg(adapter, DATA, - "RECV DATA: failed to write reg->rx_rdptr\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->rx_rdptr, + card->rxbd_rdptr | tx_val); /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { @@ -1724,43 +1680,18 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Write the lower 32bits of the physical address to low command * address scratch register */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to write download command to boot code.\n", - __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa); /* Write the upper 32bits of the physical address to high command * address scratch register */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, - (u32)((u64)buf_pa >> 32))) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to write download command to boot code.\n", - __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)buf_pa >> 32)); /* Write the command length to cmd_size scratch register */ - if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to write command len to cmd_size scratch reg\n", - __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, reg->cmd_size, skb->len); /* Ring the door bell */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DOOR_BELL)) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to assert door-bell intr\n", __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL); return 0; } @@ -1768,20 +1699,14 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* This function init rx port in firmware which in turn enables to receive data * from device before transmitting any packet. */ -static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ - if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | - tx_wrap)) { - mwifiex_dbg(adapter, ERROR, - "RECV DATA: failed to write reg->rx_rdptr\n"); - return -1; - } - return 0; + mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_wrap); } /* This function downloads commands to the device @@ -1791,7 +1716,6 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - int ret = 0; dma_addr_t cmd_buf_pa, cmdrsp_buf_pa; u8 *payload = (u8 *)skb->data; @@ -1841,63 +1765,29 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf); /* Write the lower 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, - (u32)cmdrsp_buf_pa)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, + (u32)cmdrsp_buf_pa); + /* Write the upper 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, - (u32)((u64)cmdrsp_buf_pa >> 32))) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, + (u32)((u64)cmdrsp_buf_pa >> 32)); } cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf); + /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, - (u32)cmd_buf_pa)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)cmd_buf_pa); + /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, - (u32)((u64)cmd_buf_pa >> 32))) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmd_addr_hi, + (u32)((u64)cmd_buf_pa >> 32)); /* Write the command length to reg->cmd_size */ - if (mwifiex_write_reg(adapter, reg->cmd_size, - card->cmd_buf->len)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write cmd len to reg->cmd_size\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmd_size, card->cmd_buf->len); /* Ring the door bell */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DOOR_BELL)) { - mwifiex_dbg(adapter, ERROR, - "Failed to assert door-bell intr\n"); - ret = -1; - goto done; - } - -done: - if (ret) - adapter->cmd_sent = false; + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL); return 0; } @@ -1941,13 +1831,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) MWIFIEX_SKB_DMA_ADDR(skb), MWIFIEX_SLEEP_COOKIE_SIZE, DMA_FROM_DEVICE); - if (mwifiex_write_reg(adapter, - PCIE_CPU_INT_EVENT, - CPU_INTR_SLEEP_CFM_DONE)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, + PCIE_CPU_INT_EVENT, + CPU_INTR_SLEEP_CFM_DONE); mwifiex_delay_for_sleep_cookie(adapter, MWIFIEX_MAX_DELAY_COUNT); mwifiex_unmap_pci_memory(adapter, skb, @@ -1980,18 +1866,11 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) /* Clear the cmd-rsp buffer address in scratch registers. This will prevent firmware from writing to the same response buffer again. */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) { - mwifiex_dbg(adapter, ERROR, - "cmd_done: failed to clear cmd_rsp_addr_lo\n"); - return -1; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0); + /* Write the upper 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) { - mwifiex_dbg(adapter, ERROR, - "cmd_done: failed to clear cmd_rsp_addr_hi\n"); - return -1; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0); } return 0; @@ -2098,12 +1977,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) we need to find a better method of managing these buffers. */ } else { - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_EVENT_DONE)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_EVENT_DONE); } return 0; @@ -2117,7 +1992,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - int ret = 0; u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; u32 wrptr; struct mwifiex_evt_buf_desc *desc; @@ -2169,18 +2043,11 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, card->evtbd_rdptr, wrptr); /* Write the event ring read pointer in to reg->evt_rdptr */ - if (mwifiex_write_reg(adapter, reg->evt_rdptr, - card->evtbd_rdptr)) { - mwifiex_dbg(adapter, ERROR, - "event_complete: failed to read reg->evt_rdptr\n"); - return -1; - } + mwifiex_write_reg(adapter, reg->evt_rdptr, card->evtbd_rdptr); mwifiex_dbg(adapter, EVENT, "info: Check Events Again\n"); - ret = mwifiex_pcie_process_event_ready(adapter); - - return ret; + return mwifiex_pcie_process_event_ready(adapter); } /* Combo firmware image is a combination of @@ -2313,11 +2180,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, "info: Downloading FW image (%d bytes)\n", firmware_len); - if (mwifiex_pcie_disable_host_int(adapter)) { - mwifiex_dbg(adapter, ERROR, - "%s: Disabling interrupts failed.\n", __func__); - return -1; - } + mwifiex_pcie_disable_host_int(adapter); skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { @@ -2471,21 +2334,12 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) u32 tries; /* Mask spurios interrupts */ - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK, - HOST_INTR_MASK)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK, HOST_INTR_MASK); mwifiex_dbg(adapter, INFO, "Setting driver ready signature\n"); - if (mwifiex_write_reg(adapter, reg->drv_rdy, - FIRMWARE_READY_PCIE)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write driver ready signature\n"); - return -1; - } + + mwifiex_write_reg(adapter, reg->drv_rdy, FIRMWARE_READY_PCIE); /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { @@ -2571,12 +2425,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, mwifiex_pcie_disable_host_int(adapter); /* Clear the pending interrupts */ - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, - ~pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return; - } + mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg); } if (!adapter->pps_uapsd_mode && @@ -2671,13 +2520,9 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) } if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { - if (mwifiex_write_reg(adapter, - PCIE_HOST_INT_STATUS, - ~pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, + PCIE_HOST_INT_STATUS, + ~pcie_ireg); if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) { adapter->ps_state = PS_STATE_AWAKE; @@ -2801,7 +2646,7 @@ mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) static enum rdwr_status mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) { - int ret, tries; + int tries; u8 ctrl_data; u32 fw_status; struct pcie_service_card *card = adapter->card; @@ -2810,13 +2655,7 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) return RDWR_STATUS_FAILURE; - ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, - reg->fw_dump_host_ready); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "PCIE write err\n"); - return RDWR_STATUS_FAILURE; - } + mwifiex_write_reg(adapter, reg->fw_dump_ctrl, reg->fw_dump_host_ready); for (tries = 0; tries < MAX_POLL_TRIES; tries++) { mwifiex_read_reg_byte(adapter, reg->fw_dump_ctrl, &ctrl_data); @@ -2827,13 +2666,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (ctrl_data != reg->fw_dump_host_ready) { mwifiex_dbg(adapter, WARN, "The ctrl reg was changed, re-try again!\n"); - ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, - reg->fw_dump_host_ready); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "PCIE write err\n"); - return RDWR_STATUS_FAILURE; - } + mwifiex_write_reg(adapter, reg->fw_dump_ctrl, + reg->fw_dump_host_ready); } usleep_range(100, 200); } @@ -2852,7 +2686,6 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) u8 idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; - int ret; if (!card->pcie.can_dump_fw) return; @@ -2906,12 +2739,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) if (memory_size == 0) { mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); - ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, - creg->fw_dump_read_done); - if (ret) { - mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); - return; - } + mwifiex_write_reg(adapter, creg->fw_dump_ctrl, + creg->fw_dump_read_done); break; } @@ -3197,9 +3026,7 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) if (fw_status == FIRMWARE_READY_PCIE) { mwifiex_dbg(adapter, INFO, "Clearing driver ready signature\n"); - if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) - mwifiex_dbg(adapter, ERROR, - "Failed to write driver not-ready signature\n"); + mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000); } pci_disable_device(pdev); @@ -3404,8 +3231,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct pci_dev *pdev = card->dev; - if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) - mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); + mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000); pci_clear_master(pdev); @@ -3423,7 +3249,7 @@ static struct mwifiex_if_ops pcie_ops = { .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, .enable_int = mwifiex_pcie_enable_host_int, - .disable_int = mwifiex_pcie_disable_host_int_noerr, + .disable_int = mwifiex_pcie_disable_host_int, .process_int_status = mwifiex_process_int_status, .host_to_card = mwifiex_pcie_host_to_card, .wakeup = mwifiex_pm_wakeup_card, @@ -3449,3 +3275,9 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); MODULE_VERSION(PCIE_VERSION); MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME); +MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME); +MODULE_FIRMWARE(PCIE8897_A0_FW_NAME); +MODULE_FIRMWARE(PCIE8897_B0_FW_NAME); +MODULE_FIRMWARE(PCIEUART8997_FW_NAME_V4); +MODULE_FIRMWARE(PCIEUSB8997_FW_NAME_V4); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index e66ba0d156..75f53c2f1e 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2574,20 +2574,11 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) if (!card->mp_regs) return -ENOMEM; - /* Allocate skb pointer buffers */ - card->mpa_rx.skb_arr = kcalloc(card->mp_agg_pkt_limit, sizeof(void *), - GFP_KERNEL); - if (!card->mpa_rx.skb_arr) { - kfree(card->mp_regs); - return -ENOMEM; - } - card->mpa_rx.len_arr = kcalloc(card->mp_agg_pkt_limit, sizeof(*card->mpa_rx.len_arr), GFP_KERNEL); if (!card->mpa_rx.len_arr) { kfree(card->mp_regs); - kfree(card->mpa_rx.skb_arr); return -ENOMEM; } @@ -2642,7 +2633,6 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) cancel_work_sync(&card->work); kfree(card->mp_regs); - kfree(card->mpa_rx.skb_arr); kfree(card->mpa_rx.len_arr); kfree(card->mpa_tx.buf); kfree(card->mpa_rx.buf); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index a5112cb35c..cb63ad55d6 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -164,10 +164,7 @@ struct mwifiex_sdio_mpa_rx { u32 pkt_cnt; u32 ports; u16 start_port; - - struct sk_buff **skb_arr; u32 *len_arr; - u8 enabled; u32 buf_size; u32 pkt_aggr_limit; @@ -374,7 +371,6 @@ static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card, else card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1); } - card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = NULL; card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = rx_len; card->mpa_rx.pkt_cnt++; } diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 7eb1b0b63d..a86f800b8b 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -44,3 +44,4 @@ source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7921/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7996/Kconfig" +source "drivers/net/wireless/mediatek/mt76/mt7925/Kconfig" diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 85c4799be9..d6575fe18c 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -44,3 +44,4 @@ obj-$(CONFIG_MT7615_COMMON) += mt7615/ obj-$(CONFIG_MT7915E) += mt7915/ obj-$(CONFIG_MT7921_COMMON) += mt7921/ obj-$(CONFIG_MT7996E) += mt7996/ +obj-$(CONFIG_MT7925_COMMON) += mt7925/ diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index 57fbcc83e0..ae83be572b 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -109,8 +109,6 @@ mt76_register_debugfs_fops(struct mt76_phy *phy, struct dentry *dir; dir = debugfs_create_dir("mt76", phy->hw->wiphy->debugfsdir); - if (!dir) - return NULL; debugfs_create_u8("led_pin", 0600, dir, &phy->leds.pin); debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index cd04865970..68ad915203 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -53,6 +53,11 @@ mt76_alloc_txwi(struct mt76_dev *dev) addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { + kfree(txwi); + return NULL; + } + t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); t->dma_addr = addr; @@ -734,16 +739,18 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) if (!q->ndesc) return; - spin_lock_bh(&q->lock); - do { + spin_lock_bh(&q->lock); buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); + spin_unlock_bh(&q->lock); + if (!buf) break; mt76_put_page_pool_buf(buf, false); } while (1); + spin_lock_bh(&q->lock); if (q->rx_head) { dev_kfree_skb(q->rx_head); q->rx_head = NULL; diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 1de3c734e1..50820fe00b 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -188,7 +188,7 @@ static bool mt76_string_prop_find(struct property *prop, const char *str) return false; } -static struct device_node * +struct device_node * mt76_find_power_limits_node(struct mt76_dev *dev) { struct device_node *np = dev->dev->of_node; @@ -227,6 +227,7 @@ mt76_find_power_limits_node(struct mt76_dev *dev) of_node_put(np); return fallback; } +EXPORT_SYMBOL_GPL(mt76_find_power_limits_node); static const __be32 * mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min) @@ -241,7 +242,7 @@ mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min) return prop->value; } -static struct device_node * +struct device_node * mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan) { struct device_node *cur; @@ -265,6 +266,8 @@ mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan) return NULL; } +EXPORT_SYMBOL_GPL(mt76_find_channel_node); + static s8 mt76_get_txs_delta(struct device_node *np, u8 nss) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index dbab400969..51a767121b 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -415,6 +415,9 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) struct mt76_dev *dev = phy->dev; struct wiphy *wiphy = hw->wiphy; + INIT_LIST_HEAD(&phy->tx_list); + spin_lock_init(&phy->tx_lock); + SET_IEEE80211_DEV(hw, dev->dev); SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); @@ -452,7 +455,8 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { + if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) && + hw->max_tx_fragments > 1) { ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); } @@ -566,7 +570,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q) { struct page_pool_params pp_params = { .order = 0, - .flags = PP_FLAG_PAGE_FRAG, + .flags = 0, .nid = NUMA_NO_NODE, .dev = dev->dma_dev, }; @@ -688,6 +692,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, int ret; dev_set_drvdata(dev->dev, dev); + mt76_wcid_init(&dev->global_wcid); ret = mt76_phy_init(phy, hw); if (ret) return ret; @@ -743,6 +748,7 @@ void mt76_unregister_device(struct mt76_dev *dev) if (IS_ENABLED(CONFIG_MT76_LEDS)) mt76_led_cleanup(&dev->phy); mt76_tx_status_check(dev, true); + mt76_wcid_cleanup(dev, &dev->global_wcid); ieee80211_unregister_hw(hw); } EXPORT_SYMBOL_GPL(mt76_unregister_device); @@ -1411,7 +1417,7 @@ mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif, wcid->phy_idx = phy->band_idx; rcu_assign_pointer(dev->wcid[wcid->idx], wcid); - mt76_packet_id_init(wcid); + mt76_wcid_init(wcid); out: mutex_unlock(&dev->mutex); @@ -1430,7 +1436,7 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, if (dev->drv->sta_remove) dev->drv->sta_remove(dev, vif, sta); - mt76_packet_id_flush(dev, wcid); + mt76_wcid_cleanup(dev, wcid); mt76_wcid_mask_clear(dev->wcid_mask, idx); mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); @@ -1486,6 +1492,47 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); +void mt76_wcid_init(struct mt76_wcid *wcid) +{ + INIT_LIST_HEAD(&wcid->tx_list); + skb_queue_head_init(&wcid->tx_pending); + + INIT_LIST_HEAD(&wcid->list); + idr_init(&wcid->pktid); +} +EXPORT_SYMBOL_GPL(mt76_wcid_init); + +void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) +{ + struct mt76_phy *phy = dev->phys[wcid->phy_idx]; + struct ieee80211_hw *hw; + struct sk_buff_head list; + struct sk_buff *skb; + + mt76_tx_status_lock(dev, &list); + mt76_tx_status_skb_get(dev, wcid, -1, &list); + mt76_tx_status_unlock(dev, &list); + + idr_destroy(&wcid->pktid); + + spin_lock_bh(&phy->tx_lock); + + if (!list_empty(&wcid->tx_list)) + list_del_init(&wcid->tx_list); + + spin_lock(&wcid->tx_pending.lock); + skb_queue_splice_tail_init(&wcid->tx_pending, &list); + spin_unlock(&wcid->tx_pending.lock); + + spin_unlock_bh(&phy->tx_lock); + + while ((skb = __skb_dequeue(&list)) != NULL) { + hw = mt76_tx_status_get_hw(dev, skb); + ieee80211_free_txskb(hw, skb); + } +} +EXPORT_SYMBOL_GPL(mt76_wcid_cleanup); + int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 7f44736ca2..a17b2fbd69 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -334,6 +334,9 @@ struct mt76_wcid { u32 tx_info; bool sw_iv; + struct list_head tx_list; + struct sk_buff_head tx_pending; + struct list_head list; struct idr pktid; @@ -376,7 +379,7 @@ struct mt76_rx_tid { u8 started:1, stopped:1, timer_pending:1; - struct sk_buff *reorder_buf[]; + struct sk_buff *reorder_buf[] __counted_by(size); }; #define MT_TX_CB_DMA_DONE BIT(0) @@ -719,6 +722,8 @@ struct mt76_phy { unsigned long state; u8 band_idx; + spinlock_t tx_lock; + struct list_head tx_list; struct mt76_queue *q_tx[__MT_TXQ_MAX]; struct cfg80211_chan_def chandef; @@ -967,6 +972,7 @@ struct mt76_power_limits { s8 ofdm[8]; s8 mcs[4][10]; s8 ru[7][12]; + s8 eht[16][16]; }; struct mt76_ethtool_worker_info { @@ -1530,6 +1536,11 @@ mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); +struct device_node * +mt76_find_power_limits_node(struct mt76_dev *dev); +struct device_node * +mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan); + s8 mt76_get_rate_power_limits(struct mt76_phy *phy, struct ieee80211_channel *chan, struct mt76_power_limits *dest, @@ -1599,22 +1610,7 @@ mt76_token_put(struct mt76_dev *dev, int token) return txwi; } -static inline void mt76_packet_id_init(struct mt76_wcid *wcid) -{ - INIT_LIST_HEAD(&wcid->list); - idr_init(&wcid->pktid); -} - -static inline void -mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid) -{ - struct sk_buff_head list; - - mt76_tx_status_lock(dev, &list); - mt76_tx_status_skb_get(dev, wcid, -1, &list); - mt76_tx_status_unlock(dev, &list); - - idr_destroy(&wcid->pktid); -} +void mt76_wcid_init(struct mt76_wcid *wcid); +void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index 0762de3ce5..6c55c72f28 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -184,6 +184,13 @@ mt7603_mac_init(struct mt7603_dev *dev) mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE); + if (is_mt7628(dev)) { + mt76_set(dev, MT_TMAC_TCR, + MT_TMAC_TCR_TXOP_BURST_STOP | BIT(1) | BIT(0)); + mt76_set(dev, MT_TXREQ, BIT(27)); + mt76_set(dev, MT_AGG_TMP, GENMASK(4, 2)); + } + mt7603_set_tmac_template(dev); /* Enable RX group to HIF */ @@ -517,6 +524,7 @@ int mt7603_register_device(struct mt7603_dev *dev) hw->max_rates = 3; hw->max_report_rates = 7; hw->max_rate_tries = 11; + hw->max_tx_fragments = 1; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index c213fd2a52..89d738deea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -70,7 +70,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mvif->sta.wcid.idx = idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.vif = mvif; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); eth_broadcast_addr(bc_addr); mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); @@ -110,7 +110,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); mutex_unlock(&dev->mt76.mutex); - mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); + mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid); } void mt7603_init_edcca(struct mt7603_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 18a50ccff1..f7722f67db 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -58,10 +58,7 @@ int mt7615_thermal_init(struct mt7615_dev *dev) wiphy_name(wiphy)); hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, mt7615_hwmon_groups); - if (IS_ERR(hwmon)) - return PTR_ERR(hwmon); - - return 0; + return PTR_ERR_OR_ZERO(hwmon); } EXPORT_SYMBOL_GPL(mt7615_thermal_init); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 200b1752ca..dab16b5fc3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -226,7 +226,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, mvif->sta.wcid.idx = idx; mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; mvif->sta.wcid.hw_key_idx = -1; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7615_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -279,7 +279,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, list_del_init(&msta->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); + mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid); } int mt7615_set_channel(struct mt7615_phy *phy) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h index 22878f0888..1f29d8cd90 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h @@ -172,6 +172,11 @@ struct mt76_connac_tx_free { extern const struct wiphy_wowlan_support mt76_connac_wowlan_support; +static inline bool is_mt7925(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7925; +} + static inline bool is_mt7922(struct mt76_dev *dev) { return mt76_chip(dev) == 0x7922; @@ -245,6 +250,7 @@ static inline bool is_mt76_fw_txp(struct mt76_dev *dev) switch (mt76_chip(dev)) { case 0x7961: case 0x7922: + case 0x7925: case 0x7663: case 0x7622: return false; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index 87bfa441a9..2250252b20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -271,7 +271,7 @@ enum tx_mgnt_type { #define MT_TXFREE0_MSDU_CNT GENMASK(25, 16) #define MT_TXFREE0_RX_BYTE GENMASK(15, 0) -#define MT_TXFREE1_VER GENMASK(18, 16) +#define MT_TXFREE1_VER GENMASK(19, 16) #define MT_TXFREE_INFO_PAIR BIT(31) #define MT_TXFREE_INFO_HEADER BIT(30) @@ -317,6 +317,7 @@ enum tx_mgnt_type { #define MT_TXS4_TIMESTAMP GENMASK(31, 0) +/* MPDU based TXS */ #define MT_TXS5_F0_FINAL_MPDU BIT(31) #define MT_TXS5_F0_QOS BIT(30) #define MT_TXS5_F0_TX_COUNT GENMASK(29, 25) @@ -338,4 +339,17 @@ enum tx_mgnt_type { #define MT_TXS7_F1_MPDU_RETRY_COUNT GENMASK(31, 24) #define MT_TXS7_F1_MPDU_RETRY_BYTES GENMASK(23, 0) +/* PPDU based TXS */ +#define MT_TXS5_MPDU_TX_CNT GENMASK(30, 20) +#define MT_TXS5_MPDU_TX_BYTE_SCALE BIT(15) +#define MT_TXS5_MPDU_TX_BYTE GENMASK(14, 0) + +#define MT_TXS6_MPDU_FAIL_CNT GENMASK(30, 20) +#define MT_TXS6_MPDU_FAIL_BYTE_SCALE BIT(15) +#define MT_TXS6_MPDU_FAIL_BYTE GENMASK(14, 0) + +#define MT_TXS7_MPDU_RETRY_CNT GENMASK(30, 20) +#define MT_TXS7_MPDU_RETRY_BYTE_SCALE BIT(15) +#define MT_TXS7_MPDU_RETRY_BYTE GENMASK(14, 0) + #endif /* __MT76_CONNAC3_MAC_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index 87479c6c2b..93402d2c25 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -170,7 +170,7 @@ void mt76_connac_write_hw_txp(struct mt76_dev *dev, txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); - if (is_mt7663(dev) || is_mt7921(dev)) + if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev)) last_mask = MT_TXD_LEN_LAST; else last_mask = MT_TXD_LEN_AMSDU_LAST | @@ -214,7 +214,7 @@ mt76_connac_txp_skb_unmap_hw(struct mt76_dev *dev, u32 last_mask; int i; - if (is_mt7663(dev) || is_mt7921(dev)) + if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev)) last_mask = MT_TXD_LEN_LAST; else last_mask = MT_TXD_LEN_MSDU_LAST; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 8274a57e1f..b475555097 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -66,6 +66,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len, if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) || (is_mt7921(dev) && addr == 0x900000) || + (is_mt7925(dev) && addr == 0x900000) || (is_mt7996(dev) && addr == 0x900000)) cmd = MCU_CMD(PATCH_START_REQ); else @@ -745,7 +746,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) he->pkt_ext = 2; } -static void +void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta) { struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; @@ -777,20 +778,23 @@ mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta) he->pkt_ext = IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US; } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_he_tlv_v2); -static u8 +u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, enum nl80211_band band, struct ieee80211_sta *sta) { struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; u8 mode = 0; if (sta) { ht_cap = &sta->deflink.ht_cap; vht_cap = &sta->deflink.vht_cap; he_cap = &sta->deflink.he_cap; + eht_cap = &sta->deflink.eht_cap; } else { struct ieee80211_supported_band *sband; @@ -798,6 +802,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, ht_cap = &sband->ht_cap; vht_cap = &sband->vht_cap; he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type); } if (band == NL80211_BAND_2GHZ) { @@ -808,6 +813,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, if (he_cap && he_cap->has_he) mode |= PHY_TYPE_BIT_HE; + + if (eht_cap && eht_cap->has_eht) + mode |= PHY_TYPE_BIT_BE; } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) { mode |= PHY_TYPE_BIT_OFDM; @@ -819,10 +827,14 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, if (he_cap && he_cap->has_he) mode |= PHY_TYPE_BIT_HE; + + if (eht_cap && eht_cap->has_eht) + mode |= PHY_TYPE_BIT_BE; } return mode; } +EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_v2); void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct ieee80211_sta *sta, @@ -1347,7 +1359,7 @@ u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, sband = phy->hw->wiphy->bands[band]; eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type); - if (!eht_cap || !eht_cap->has_eht) + if (!eht_cap || !eht_cap->has_eht || !vif->bss_conf.eht_support) return mode; switch (band) { @@ -1929,126 +1941,6 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event); -static void mt76_connac_mcu_parse_tx_resource(struct mt76_dev *dev, - struct sk_buff *skb) -{ - struct mt76_sdio *sdio = &dev->sdio; - struct mt76_connac_tx_resource { - __le32 version; - __le32 pse_data_quota; - __le32 pse_mcu_quota; - __le32 ple_data_quota; - __le32 ple_mcu_quota; - __le16 pse_page_size; - __le16 ple_page_size; - u8 pp_padding; - u8 pad[3]; - } __packed * tx_res; - - tx_res = (struct mt76_connac_tx_resource *)skb->data; - sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota); - sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota); - sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota); - sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size); - sdio->sched.deficit = tx_res->pp_padding; -} - -static void mt76_connac_mcu_parse_phy_cap(struct mt76_dev *dev, - struct sk_buff *skb) -{ - struct mt76_connac_phy_cap { - u8 ht; - u8 vht; - u8 _5g; - u8 max_bw; - u8 nss; - u8 dbdc; - u8 tx_ldpc; - u8 rx_ldpc; - u8 tx_stbc; - u8 rx_stbc; - u8 hw_path; - u8 he; - } __packed * cap; - - enum { - WF0_24G, - WF0_5G - }; - - cap = (struct mt76_connac_phy_cap *)skb->data; - - dev->phy.antenna_mask = BIT(cap->nss) - 1; - dev->phy.chainmask = dev->phy.antenna_mask; - dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); - dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); -} - -int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy) -{ - struct mt76_connac_cap_hdr { - __le16 n_element; - u8 rsv[2]; - } __packed * hdr; - struct sk_buff *skb; - int ret, i; - - ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB), - NULL, 0, true, &skb); - if (ret) - return ret; - - hdr = (struct mt76_connac_cap_hdr *)skb->data; - if (skb->len < sizeof(*hdr)) { - ret = -EINVAL; - goto out; - } - - skb_pull(skb, sizeof(*hdr)); - - for (i = 0; i < le16_to_cpu(hdr->n_element); i++) { - struct tlv_hdr { - __le32 type; - __le32 len; - } __packed * tlv = (struct tlv_hdr *)skb->data; - int len; - - if (skb->len < sizeof(*tlv)) - break; - - skb_pull(skb, sizeof(*tlv)); - - len = le32_to_cpu(tlv->len); - if (skb->len < len) - break; - - switch (le32_to_cpu(tlv->type)) { - case MT_NIC_CAP_6G: - phy->cap.has_6ghz = skb->data[0]; - break; - case MT_NIC_CAP_MAC_ADDR: - memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN); - break; - case MT_NIC_CAP_PHY: - mt76_connac_mcu_parse_phy_cap(phy->dev, skb); - break; - case MT_NIC_CAP_TX_RESOURCE: - if (mt76_is_sdio(phy->dev)) - mt76_connac_mcu_parse_tx_resource(phy->dev, - skb); - break; - default: - break; - } - skb_pull(skb, len); - } -out: - dev_kfree_skb(skb); - - return ret; -} -EXPORT_SYMBOL_GPL(mt76_connac_mcu_get_nic_capability); - static void mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, struct mt76_power_limits *limits, @@ -2092,9 +1984,9 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, } } -static s8 mt76_connac_get_ch_power(struct mt76_phy *phy, - struct ieee80211_channel *chan, - s8 target_power) +s8 mt76_connac_get_ch_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + s8 target_power) { struct mt76_dev *dev = phy->dev; struct ieee80211_supported_band *sband; @@ -2131,6 +2023,7 @@ static s8 mt76_connac_get_ch_power(struct mt76_phy *phy, return target_power; } +EXPORT_SYMBOL_GPL(mt76_connac_get_ch_power); static int mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, @@ -2149,7 +2042,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, 112, 114, 116, 118, 120, 122, 124, 126, 128, 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 165 + 159, 161, 165, 169, 173, 177 }; static const u8 chan_list_6ghz[] = { 1, 3, 5, 7, 9, 11, 13, @@ -2169,11 +2062,15 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, 209, 211, 213, 215, 217, 219, 221, 225, 227, 229, 233 }; - int i, n_chan, batch_size, idx = 0, tx_power, last_ch; + int i, n_chan, batch_size, idx = 0, tx_power, last_ch, err = 0; struct mt76_connac_sku_tlv sku_tlbv; - struct mt76_power_limits limits; + struct mt76_power_limits *limits; const u8 *ch_list; + limits = devm_kmalloc(dev->dev, sizeof(*limits), GFP_KERNEL); + if (!limits) + return -ENOMEM; + sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92; tx_power = 2 * phy->hw->conf.power_level; if (!tx_power) @@ -2200,14 +2097,16 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, for (i = 0; i < batch_size; i++) { struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {}; - int j, err, msg_len, num_ch; + int j, msg_len, num_ch; struct sk_buff *skb; num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len; msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv); skb = mt76_mcu_msg_alloc(dev, NULL, msg_len); - if (!skb) - return -ENOMEM; + if (!skb) { + err = -ENOMEM; + goto out; + } skb_reserve(skb, sizeof(tx_power_tlv)); @@ -2238,14 +2137,14 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, tx_power); sar_power = mt76_get_sar_power(phy, &chan, reg_power); - mt76_get_rate_power_limits(phy, &chan, &limits, + mt76_get_rate_power_limits(phy, &chan, limits, sar_power); tx_power_tlv.last_msg = ch_list[idx] == last_ch; sku_tlbv.channel = ch_list[idx]; mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit, - &limits, band); + limits, band); skb_put_data(skb, &sku_tlbv, sku_len); } __skb_push(skb, sizeof(tx_power_tlv)); @@ -2255,10 +2154,12 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, MCU_CE_CMD(SET_RATE_TX_POWER), false); if (err < 0) - return err; + goto out; } - return 0; +out: + devm_kfree(dev->dev, limits); + return err; } int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy) @@ -2462,7 +2363,7 @@ mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif, sizeof(req), true); } -static int +int mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif, bool suspend) { @@ -2487,8 +2388,9 @@ mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif, return mt76_mcu_send_msg(dev, MCU_UNI_CMD(OFFLOAD), &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_gtk_rekey); -static int +int mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, struct ieee80211_vif *vif, bool enable, u8 mdtim, @@ -2517,6 +2419,7 @@ mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_suspend_mode); static int mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev, @@ -2552,7 +2455,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev, return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SUSPEND), true); } -static int +int mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, bool suspend, struct cfg80211_wowlan *wowlan) { @@ -2604,6 +2507,7 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_wow_ctrl); int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) { @@ -3069,7 +2973,7 @@ static u32 mt76_connac2_get_data_mode(struct mt76_dev *dev, u32 info) { u32 mode = DL_MODE_NEED_RSP; - if (!is_mt7921(dev) || info == PATCH_SEC_NOT_SUPPORT) + if ((!is_mt7921(dev) && !is_mt7925(dev)) || info == PATCH_SEC_NOT_SUPPORT) return mode; switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 4543e5bf04..0563b1b22f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -191,6 +191,7 @@ struct mt76_connac2_fw_region { struct tlv { __le16 tag; __le16 len; + u8 data[]; } __packed; struct bss_info_omac { @@ -795,6 +796,7 @@ enum { STA_REC_PHY = 0x15, STA_REC_HE_6G = 0x17, STA_REC_HE_V2 = 0x19, + STA_REC_MLD = 0x20, STA_REC_EHT = 0x22, STA_REC_HDRT = 0x28, STA_REC_HDR_TRANS = 0x2B, @@ -919,6 +921,7 @@ enum { PHY_TYPE_HT_INDEX, PHY_TYPE_VHT_INDEX, PHY_TYPE_HE_INDEX, + PHY_TYPE_BE_INDEX, PHY_TYPE_INDEX_NUM }; @@ -928,6 +931,7 @@ enum { #define PHY_TYPE_BIT_HT BIT(PHY_TYPE_HT_INDEX) #define PHY_TYPE_BIT_VHT BIT(PHY_TYPE_VHT_INDEX) #define PHY_TYPE_BIT_HE BIT(PHY_TYPE_HE_INDEX) +#define PHY_TYPE_BIT_BE BIT(PHY_TYPE_BE_INDEX) #define MT_WTBL_RATE_TX_MODE GENMASK(9, 6) #define MT_WTBL_RATE_MCS GENMASK(5, 0) @@ -1009,8 +1013,17 @@ enum { enum { MCU_UNI_EVENT_RESULT = 0x01, MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04, + MCU_UNI_EVENT_ACCESS_REG = 0x6, MCU_UNI_EVENT_IE_COUNTDOWN = 0x09, + MCU_UNI_EVENT_COREDUMP = 0x0a, + MCU_UNI_EVENT_BSS_BEACON_LOSS = 0x0c, + MCU_UNI_EVENT_SCAN_DONE = 0x0e, MCU_UNI_EVENT_RDD_REPORT = 0x11, + MCU_UNI_EVENT_ROC = 0x27, + MCU_UNI_EVENT_TX_DONE = 0x2d, + MCU_UNI_EVENT_NIC_CAPAB = 0x43, + MCU_UNI_EVENT_PER_STA_INFO = 0x6d, + MCU_UNI_EVENT_ALL_STA_INFO = 0x6e, }; #define MCU_UNI_CMD_EVENT BIT(1) @@ -1209,12 +1222,17 @@ enum { MCU_UNI_CMD_RX_HDR_TRANS = 0x12, MCU_UNI_CMD_SER = 0x13, MCU_UNI_CMD_TWT = 0x14, + MCU_UNI_CMD_SET_DOMAIN_INFO = 0x15, + MCU_UNI_CMD_SCAN_REQ = 0x16, MCU_UNI_CMD_RDD_CTRL = 0x19, MCU_UNI_CMD_GET_MIB_INFO = 0x22, + MCU_UNI_CMD_GET_STAT_INFO = 0x23, MCU_UNI_CMD_SNIFFER = 0x24, MCU_UNI_CMD_SR = 0x25, MCU_UNI_CMD_ROC = 0x27, + MCU_UNI_CMD_SET_DBDC_PARMS = 0x28, MCU_UNI_CMD_TXPOWER = 0x2b, + MCU_UNI_CMD_SET_POWER_LIMIT = 0x2c, MCU_UNI_CMD_EFUSE_CTRL = 0x2d, MCU_UNI_CMD_RA = 0x2f, MCU_UNI_CMD_MURU = 0x31, @@ -1224,6 +1242,8 @@ enum { MCU_UNI_CMD_VOW = 0x37, MCU_UNI_CMD_RRO = 0x57, MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58, + MCU_UNI_CMD_PER_STA_INFO = 0x6d, + MCU_UNI_CMD_ALL_STA_INFO = 0x6e, MCU_UNI_CMD_ASSERT_DUMP = 0x6f, }; @@ -1279,6 +1299,7 @@ enum { UNI_BSS_INFO_RLM = 2, UNI_BSS_INFO_BSS_COLOR = 4, UNI_BSS_INFO_HE_BASIC = 5, + UNI_BSS_INFO_11V_MBSSID = 6, UNI_BSS_INFO_BCN_CONTENT = 7, UNI_BSS_INFO_BCN_CSA = 8, UNI_BSS_INFO_BCN_BCC = 9, @@ -1293,6 +1314,7 @@ enum { UNI_BSS_INFO_IFS_TIME = 23, UNI_BSS_INFO_OFFLOAD = 25, UNI_BSS_INFO_MLD = 26, + UNI_BSS_INFO_PM_DISABLE = 27, }; enum { @@ -1302,6 +1324,17 @@ enum { UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT, }; +enum UNI_ALL_STA_INFO_TAG { + UNI_ALL_STA_TX_RATE, + UNI_ALL_STA_TX_STAT, + UNI_ALL_STA_TXRX_ADM_STAT, + UNI_ALL_STA_TXRX_AIR_TIME, + UNI_ALL_STA_DATA_TX_RETRY_COUNT, + UNI_ALL_STA_GI_MODE, + UNI_ALL_STA_TXRX_MSDU_COUNT, + UNI_ALL_STA_MAX_NUM +}; + enum { MT_NIC_CAP_TX_RESOURCE, MT_NIC_CAP_TX_EFUSE_ADDR, @@ -1322,6 +1355,7 @@ enum { MT_NIC_CAP_ANTSWP = 0x16, MT_NIC_CAP_WFDMA_REALLOC, MT_NIC_CAP_6G, + MT_NIC_CAP_CHIP_CAP = 0x20, }; #define UNI_WOW_DETECT_TYPE_MAGIC BIT(0) @@ -1549,6 +1583,15 @@ struct bss_info_uni_he { u8 rsv[2]; } __packed; +struct bss_info_uni_mbssid { + __le16 tag; + __le16 len; + u8 max_indicator; + u8 mbss_idx; + u8 tx_bss_omac_idx; + u8 rsv; +} __packed; + struct mt76_connac_gtk_rekey_tlv { __le16 tag; __le16 len; @@ -1739,7 +1782,7 @@ mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa) ret |= feature_set & FW_FEATURE_SET_ENCRYPT ? DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0; - if (is_mt7921(dev)) + if (is_mt7921(dev) || is_mt7925(dev)) ret |= feature_set & FW_FEATURE_ENCRY_MODE ? DL_CONFIG_ENCRY_MODE_SEL : 0; ret |= FIELD_PREP(DL_MODE_KEY_IDX, @@ -1807,6 +1850,9 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev, struct ieee80211_vif *vif, struct mt76_wcid *wcid, int cmd); +void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta); +u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta); int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -1851,7 +1897,6 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len, int mt76_connac_mcu_start_patch(struct mt76_dev *dev); int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get); int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option); -int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy); int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req); @@ -1866,9 +1911,17 @@ int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy, int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, struct mt76_vif *vif, struct ieee80211_bss_conf *info); +int mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif, + bool suspend); +int mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, + bool suspend, struct cfg80211_wowlan *wowlan); int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *key); +int mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, + struct ieee80211_vif *vif, + bool enable, u8 mdtim, + bool wow_suspend); int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend); void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); @@ -1879,6 +1932,9 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev); int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable); void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, struct mt76_connac_coredump *coredump); +s8 mt76_connac_get_ch_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + s8 target_power); int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy); int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c index ad4dc8e17b..d570b99bcc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c @@ -136,7 +136,8 @@ EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer); void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt76x02_dev *dev = (struct mt76x02_dev *)priv; + struct beacon_bc_data *data = priv; + struct mt76x02_dev *dev = data->dev; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct sk_buff *skb = NULL; @@ -147,7 +148,7 @@ mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!skb) return; - mt76x02_mac_set_beacon(dev, skb); + __skb_queue_tail(&data->q, skb); } EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter); @@ -182,9 +183,6 @@ mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, { int i, nframes; - data->dev = dev; - __skb_queue_head_init(&data->q); - do { nframes = skb_queue_len(&data->q); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index e9c5e85ec0..9b5e3fb7b0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -16,13 +16,17 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet); struct mt76_dev *mdev = &dev->mt76; struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD]; - struct beacon_bc_data data = {}; + struct beacon_bc_data data = { + .dev = dev, + }; struct sk_buff *skb; int i; if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) return; + __skb_queue_head_init(&data.q); + mt76x02_resync_beacon_timer(dev); /* Prevent corrupt transmissions during update */ @@ -31,7 +35,10 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, - mt76x02_update_beacon_iter, dev); + mt76x02_update_beacon_iter, &data); + + while ((skb = __skb_dequeue(&data.q)) != NULL) + mt76x02_mac_set_beacon(dev, skb); mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~(0xff00 >> dev->beacon_data_count)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 2c6c03809b..85a78dea40 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -182,7 +182,9 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work) { struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, pre_tbtt_work); - struct beacon_bc_data data = {}; + struct beacon_bc_data data = { + .dev = dev, + }; struct sk_buff *skb; int nbeacons; @@ -192,15 +194,20 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work) if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) return; + __skb_queue_head_init(&data.q); + mt76x02_resync_beacon_timer(dev); /* Prevent corrupt transmissions during update */ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff); dev->beacon_data_count = 0; - ieee80211_iterate_active_interfaces(mt76_hw(dev), + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, - mt76x02_update_beacon_iter, dev); + mt76x02_update_beacon_iter, &data); + + while ((skb = __skb_dequeue(&data.q)) != NULL) + mt76x02_mac_set_beacon(dev, skb); mt76_csa_check(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index dcbb5c605d..8a0e8124b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -288,7 +288,7 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; - mt76_packet_id_init(&mvif->group_wcid); + mt76_wcid_init(&mvif->group_wcid); mtxq = (struct mt76_txq *)vif->txq->drv_priv; rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid); @@ -346,7 +346,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL); - mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid); + mt76_wcid_cleanup(&dev->mt76, &mvif->group_wcid); } EXPORT_SYMBOL_GPL(mt76x02_remove_interface); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 35fdf4f98d..81478289f1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -213,10 +213,7 @@ static int mt7915_thermal_init(struct mt7915_phy *phy) hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7915_hwmon_groups); - if (IS_ERR(hwmon)) - return PTR_ERR(hwmon); - - return 0; + return PTR_ERR_OR_ZERO(hwmon); } static void mt7915_led_set_config(struct led_classdev *led_cdev, @@ -347,6 +344,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy) hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; hw->netdev_features = NETIF_F_RXCSUM; + if (mtk_wed_device_active(&mdev->mmio.wed)) + hw->netdev_features |= NETIF_F_HW_TC; + hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; @@ -393,8 +393,12 @@ mt7915_init_wiphy(struct mt7915_phy *phy) phy->mt76->sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; - phy->mt76->sband_2g.sband.ht_cap.ampdu_density = - IEEE80211_HT_MPDU_DENSITY_4; + if (is_mt7915(&dev->mt76)) + phy->mt76->sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_4; + else + phy->mt76->sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_2; } if (phy->mt76->cap.has_5ghz) { @@ -404,10 +408,11 @@ mt7915_init_wiphy(struct mt7915_phy *phy) phy->mt76->sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; - phy->mt76->sband_5g.sband.ht_cap.ampdu_density = - IEEE80211_HT_MPDU_DENSITY_4; if (is_mt7915(&dev->mt76)) { + phy->mt76->sband_5g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_4; + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; @@ -417,6 +422,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy) IEEE80211_VHT_CAP_SHORT_GI_160 | FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1); } else { + phy->mt76->sband_5g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_2; + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; @@ -1127,8 +1135,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data); band = &phy->mt76->sband_2g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } if (phy->mt76->cap.has_5ghz) { @@ -1136,8 +1143,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data); band = &phy->mt76->sband_5g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } if (phy->mt76->cap.has_6ghz) { @@ -1145,8 +1151,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) n = mt7915_init_he_caps(phy, NL80211_BAND_6GHZ, data); band = &phy->mt76->sband_6g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 3196f56cdf..9d747eb8ab 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -253,7 +253,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, mvif->sta.wcid.phy_idx = ext_phy; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7915_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -314,7 +314,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, list_del_init(&msta->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_packet_id_flush(&dev->mt76, &msta->wcid); + mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } int mt7915_set_channel(struct mt7915_phy *phy) @@ -483,16 +483,22 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_MONITOR) { bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); bool band = phy->mt76->band_idx; + u32 rxfilter = phy->rxfilter; - if (!enabled) - phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; - else - phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + if (!enabled) { + rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; + dev->monitor_mask &= ~BIT(band); + } else { + rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + dev->monitor_mask |= BIT(band); + } mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN, enabled); + mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_MDP_DCR0_RX_HDR_TRANS_EN, + !dev->monitor_mask); mt76_testmode_reset(phy->mt76, true); - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + mt76_wr(dev, MT_WF_RFCR(band), rxfilter); } mutex_unlock(&dev->mt76.mutex); @@ -527,6 +533,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR1_DROP_BA | MT_WF_RFCR1_DROP_CFEND | MT_WF_RFCR1_DROP_CFACK; + u32 rxfilter; u32 flags = 0; #define MT76_FILTER(_flag, _hw) do { \ @@ -561,7 +568,12 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR_DROP_NDPA); *total_flags = flags; - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + rxfilter = phy->rxfilter; + if (hw->conf.flags & IEEE80211_CONF_MONITOR) + rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + else + rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; + mt76_wr(dev, MT_WF_RFCR(band), rxfilter); if (*total_flags & FIF_CONTROL) mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); @@ -1642,6 +1654,20 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw, return 0; } + +static int +mt7915_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; + + if (!mtk_wed_device_active(wed)) + return -EOPNOTSUPP; + + return mtk_wed_device_setup_tc(wed, netdev, type, type_data); +} #endif const struct ieee80211_ops mt7915_ops = { @@ -1696,5 +1722,6 @@ const struct ieee80211_ops mt7915_ops = { .set_radar_background = mt7915_set_radar_background, #ifdef CONFIG_NET_MEDIATEK_SOC_WED .net_fill_forward_path = mt7915_net_fill_forward_path, + .net_setup_tc = mt7915_net_setup_tc, #endif }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 5d8e985cd7..b22f06d441 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -225,8 +225,10 @@ int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) static void mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (vif->bss_conf.csa_active) - ieee80211_csa_finish(vif); + if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION) + return; + + ieee80211_csa_finish(vif); } static void @@ -326,7 +328,7 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) static void mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (!vif->bss_conf.color_change_active) + if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) return; ieee80211_color_change_finish(vif); @@ -906,6 +908,8 @@ mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb, HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); muru->ofdma_ul.uo_ra = HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); + muru->ofdma_ul.rx_ctrl_frame_to_mbss = + HE_MAC(CAP3_RX_CTRL_FRAME_TO_MULTIBSS, elem->mac_cap_info[3]); } static void diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index f4ad7219f9..04a49ef675 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -591,7 +591,7 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) { - struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc; + struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc; struct mt76_txwi_cache *t = NULL; struct mt7915_dev *dev; struct mt76_queue *q; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 21984e9723..d317c523b2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -295,6 +295,8 @@ struct mt7915_dev { bool muru_debug; bool ibf; + u8 monitor_mask; + struct dentry *debugfs_dir; struct rchan *relay_fwlog; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index 588cd87e24..89ac8e6707 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -172,6 +172,7 @@ enum offs_rev { #define MT_MDP_DCR0 MT_MDP(0x000) #define MT_MDP_DCR0_DAMSDU_EN BIT(15) +#define MT_MDP_DCR0_RX_HDR_TRANS_EN BIT(19) #define MT_MDP_DCR1 MT_MDP(0x004) #define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index 37348b2087..06e3d9db99 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -1219,10 +1219,7 @@ static int mt798x_wmac_init(struct mt7915_dev *dev) return PTR_ERR(dev->sku); dev->rstc = devm_reset_control_get(pdev, "consys"); - if (IS_ERR(dev->rstc)) - return PTR_ERR(dev->rstc); - - return 0; + return PTR_ERR_OR_ZERO(dev->rstc); } static int mt798x_wmac_probe(struct platform_device *pdev) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index ff63f37f67..48433c6d5e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -55,27 +55,91 @@ static int mt7921_thermal_init(struct mt792x_phy *phy) hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7921_hwmon_groups); - if (IS_ERR(hwmon)) - return PTR_ERR(hwmon); + return PTR_ERR_OR_ZERO(hwmon); +} - return 0; +static void +mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev) +{ +#define IS_UNII_INVALID(idx, sfreq, efreq) \ + (!(dev->phy.clc_chan_conf & BIT(idx)) && (cfreq) >= (sfreq) && (cfreq) <= (efreq)) + struct ieee80211_supported_band *sband; + struct mt76_dev *mdev = &dev->mt76; + struct device_node *np, *band_np; + struct ieee80211_channel *ch; + int i, cfreq; + + np = mt76_find_power_limits_node(mdev); + + sband = wiphy->bands[NL80211_BAND_5GHZ]; + band_np = np ? of_get_child_by_name(np, "txpower-5g") : NULL; + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + cfreq = ch->center_freq; + + if (np && (!band_np || !mt76_find_channel_node(band_np, ch))) { + ch->flags |= IEEE80211_CHAN_DISABLED; + continue; + } + + /* UNII-4 */ + if (IS_UNII_INVALID(0, 5850, 5925)) + ch->flags |= IEEE80211_CHAN_DISABLED; + } + + sband = wiphy->bands[NL80211_BAND_6GHZ]; + if (!sband) + return; + + band_np = np ? of_get_child_by_name(np, "txpower-6g") : NULL; + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + cfreq = ch->center_freq; + + if (np && (!band_np || !mt76_find_channel_node(band_np, ch))) { + ch->flags |= IEEE80211_CHAN_DISABLED; + continue; + } + + /* UNII-5/6/7/8 */ + if (IS_UNII_INVALID(1, 5925, 6425) || + IS_UNII_INVALID(2, 6425, 6525) || + IS_UNII_INVALID(3, 6525, 6875) || + IS_UNII_INVALID(4, 6875, 7125)) + ch->flags |= IEEE80211_CHAN_DISABLED; + } } +void mt7921_regd_update(struct mt792x_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + struct ieee80211_hw *hw = mdev->hw; + struct wiphy *wiphy = hw->wiphy; + + mt7921_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env); + mt7921_regd_channel_update(wiphy, dev); + mt76_connac_mcu_set_channel_domain(hw->priv); + mt7921_set_tx_sar_pwr(hw, NULL); +} +EXPORT_SYMBOL_GPL(mt7921_regd_update); + static void mt7921_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_connac_pm *pm = &dev->pm; memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); dev->mt76.region = request->dfs_region; dev->country_ie_env = request->country_ie_env; + if (pm->suspended) + return; + mt792x_mutex_acquire(dev); - mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env); - mt76_connac_mcu_set_channel_domain(hw->priv); - mt7921_set_tx_sar_pwr(hw, NULL); + mt7921_regd_update(dev); mt792x_mutex_release(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 21f9374542..867e14f6b9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -794,7 +794,7 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0; - mt7921_skb_add_usb_sdio_hdr(dev, skb, type); + mt792x_skb_add_usb_sdio_hdr(dev, skb, type); pad = round_up(skb->len, 4) - skb->len; if (mt76_is_usb(mdev)) pad += 4; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index d8851cb5f4..0645417e05 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -196,8 +196,7 @@ void mt7921_set_stream_he_caps(struct mt792x_phy *phy) n = mt7921_init_he_caps(phy, NL80211_BAND_2GHZ, data); band = &phy->mt76->sband_2g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } if (phy->mt76->cap.has_5ghz) { @@ -205,16 +204,14 @@ void mt7921_set_stream_he_caps(struct mt792x_phy *phy) n = mt7921_init_he_caps(phy, NL80211_BAND_5GHZ, data); band = &phy->mt76->sband_5g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); if (phy->mt76->cap.has_6ghz) { data = phy->iftype[NL80211_BAND_6GHZ]; n = mt7921_init_he_caps(phy, NL80211_BAND_6GHZ, data); band = &phy->mt76->sband_6g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } } } @@ -262,25 +259,6 @@ static int mt7921_start(struct ieee80211_hw *hw) return err; } -void mt7921_stop(struct ieee80211_hw *hw) -{ - struct mt792x_dev *dev = mt792x_hw_dev(hw); - struct mt792x_phy *phy = mt792x_hw_phy(hw); - - cancel_delayed_work_sync(&phy->mt76->mac_work); - - cancel_delayed_work_sync(&dev->pm.ps_work); - cancel_work_sync(&dev->pm.wake_work); - cancel_work_sync(&dev->reset_work); - mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); - - mt792x_mutex_acquire(dev); - clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); - mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); - mt792x_mutex_release(dev); -} -EXPORT_SYMBOL_GPL(mt7921_stop); - static int mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -318,7 +296,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -704,6 +682,68 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, mt792x_mutex_release(dev); } +static void +mt7921_calc_vif_num(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + u32 *num = priv; + + if (!priv) + return; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + *num += 1; + break; + default: + break; + } +} + +static void +mt7921_regd_set_6ghz_power_type(struct ieee80211_vif *vif, bool is_add) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mvif->phy; + struct mt792x_dev *dev = phy->dev; + u32 valid_vif_num = 0; + + ieee80211_iterate_active_interfaces(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7921_calc_vif_num, &valid_vif_num); + + if (valid_vif_num > 1) { + phy->power_type = MT_AP_DEFAULT; + goto out; + } + + if (!is_add) + vif->bss_conf.power_type = IEEE80211_REG_UNSET_AP; + + switch (vif->bss_conf.power_type) { + case IEEE80211_REG_SP_AP: + phy->power_type = MT_AP_SP; + break; + case IEEE80211_REG_VLP_AP: + phy->power_type = MT_AP_VLP; + break; + case IEEE80211_REG_LPI_AP: + phy->power_type = MT_AP_LPI; + break; + case IEEE80211_REG_UNSET_AP: + phy->power_type = MT_AP_UNSET; + break; + default: + phy->power_type = MT_AP_DEFAULT; + break; + } + +out: + mt7921_mcu_set_clc(dev, dev->mt76.alpha2, dev->country_ie_env); +} + int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { @@ -739,6 +779,8 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (ret) return ret; + mt7921_regd_set_6ghz_power_type(vif, true); + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); return 0; @@ -799,6 +841,8 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, list_del_init(&msta->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); + mt7921_regd_set_6ghz_power_type(vif, false); + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } EXPORT_SYMBOL_GPL(mt7921_mac_sta_remove); @@ -1315,7 +1359,7 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw, const struct ieee80211_ops mt7921_ops = { .tx = mt792x_tx, .start = mt7921_start, - .stop = mt7921_stop, + .stop = mt792x_stop, .add_interface = mt7921_add_interface, .remove_interface = mt792x_remove_interface, .config = mt7921_config, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index d1b1b8f767..399d7ca6be 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -375,6 +375,7 @@ static int mt7921_load_clc(struct mt792x_dev *dev, const char *fw_name) int ret, i, len, offset = 0; u8 *clc_base = NULL, hw_encap = 0; + dev->phy.clc_chan_conf = 0xff; if (mt7921_disable_clc || mt76_is_usb(&dev->mt76)) return 0; @@ -448,6 +449,129 @@ out: return ret; } +static void mt7921_mcu_parse_tx_resource(struct mt76_dev *dev, + struct sk_buff *skb) +{ + struct mt76_sdio *sdio = &dev->sdio; + struct mt7921_tx_resource { + __le32 version; + __le32 pse_data_quota; + __le32 pse_mcu_quota; + __le32 ple_data_quota; + __le32 ple_mcu_quota; + __le16 pse_page_size; + __le16 ple_page_size; + u8 pp_padding; + u8 pad[3]; + } __packed * tx_res; + + tx_res = (struct mt7921_tx_resource *)skb->data; + sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota); + sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota); + sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota); + sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size); + sdio->sched.deficit = tx_res->pp_padding; +} + +static void mt7921_mcu_parse_phy_cap(struct mt76_dev *dev, + struct sk_buff *skb) +{ + struct mt7921_phy_cap { + u8 ht; + u8 vht; + u8 _5g; + u8 max_bw; + u8 nss; + u8 dbdc; + u8 tx_ldpc; + u8 rx_ldpc; + u8 tx_stbc; + u8 rx_stbc; + u8 hw_path; + u8 he; + } __packed * cap; + + enum { + WF0_24G, + WF0_5G + }; + + cap = (struct mt7921_phy_cap *)skb->data; + + dev->phy.antenna_mask = BIT(cap->nss) - 1; + dev->phy.chainmask = dev->phy.antenna_mask; + dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); + dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); +} + +static int mt7921_mcu_get_nic_capability(struct mt792x_phy *mphy) +{ + struct mt76_connac_cap_hdr { + __le16 n_element; + u8 rsv[2]; + } __packed * hdr; + struct sk_buff *skb; + struct mt76_phy *phy = mphy->mt76; + int ret, i; + + ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB), + NULL, 0, true, &skb); + if (ret) + return ret; + + hdr = (struct mt76_connac_cap_hdr *)skb->data; + if (skb->len < sizeof(*hdr)) { + ret = -EINVAL; + goto out; + } + + skb_pull(skb, sizeof(*hdr)); + + for (i = 0; i < le16_to_cpu(hdr->n_element); i++) { + struct tlv_hdr { + __le32 type; + __le32 len; + } __packed * tlv = (struct tlv_hdr *)skb->data; + int len; + + if (skb->len < sizeof(*tlv)) + break; + + skb_pull(skb, sizeof(*tlv)); + + len = le32_to_cpu(tlv->len); + if (skb->len < len) + break; + + switch (le32_to_cpu(tlv->type)) { + case MT_NIC_CAP_6G: + phy->cap.has_6ghz = skb->data[0]; + break; + case MT_NIC_CAP_MAC_ADDR: + memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN); + break; + case MT_NIC_CAP_PHY: + mt7921_mcu_parse_phy_cap(phy->dev, skb); + break; + case MT_NIC_CAP_TX_RESOURCE: + if (mt76_is_sdio(phy->dev)) + mt7921_mcu_parse_tx_resource(phy->dev, + skb); + break; + case MT_NIC_CAP_CHIP_CAP: + memcpy(&mphy->chip_cap, (void *)skb->data, sizeof(u64)); + break; + default: + break; + } + skb_pull(skb, len); + } +out: + dev_kfree_skb(skb); + + return ret; +} + int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl) { struct { @@ -469,7 +593,7 @@ int mt7921_run_firmware(struct mt792x_dev *dev) if (err) return err; - err = mt76_connac_mcu_get_nic_capability(&dev->mphy); + err = mt7921_mcu_get_nic_capability(&dev->phy); if (err) return err; @@ -1123,7 +1247,9 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, struct mt7921_clc *clc, u8 idx) { - struct sk_buff *skb; +#define CLC_CAP_EVT_EN BIT(0) +#define CLC_CAP_DTS_EN BIT(1) + struct sk_buff *skb, *ret_skb = NULL; struct { u8 ver; u8 pad0; @@ -1131,14 +1257,16 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, u8 idx; u8 env; u8 acpi_conf; - u8 pad1; + u8 cap; u8 alpha2[2]; u8 type[2]; - u8 rsvd[64]; + u8 env_6g; + u8 rsvd[63]; } __packed req = { .ver = 1, .idx = idx, .env = env_cap, + .env_6g = dev->phy.power_type, .acpi_conf = mt792x_acpi_get_flags(&dev->phy), }; int ret, valid_cnt = 0; @@ -1148,6 +1276,11 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, if (!clc) return 0; + if (dev->phy.chip_cap & MT792x_CHIP_CAP_CLC_EVT_EN) + req.cap |= CLC_CAP_EVT_EN; + if (mt76_find_power_limits_node(&dev->mt76)) + req.cap |= CLC_CAP_DTS_EN; + buf_len = le16_to_cpu(clc->len) - sizeof(*clc); pos = clc->data; while (buf_len > 16) { @@ -1172,10 +1305,21 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, return -ENOMEM; skb_put_data(skb, rule->data, len); - ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_CE_CMD(SET_CLC), false); + ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb, + MCU_CE_CMD(SET_CLC), + !!(req.cap & CLC_CAP_EVT_EN), + &ret_skb); if (ret < 0) return ret; + + if (ret_skb) { + struct mt7921_clc_info_tlv *info; + + info = (struct mt7921_clc_info_tlv *)(ret_skb->data + 4); + dev->phy.clc_chan_conf = info->chan_conf; + dev_kfree_skb(ret_skb); + } + valid_cnt++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h index 9b0aa3b70f..f9a259ee6b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h @@ -99,4 +99,17 @@ struct mt7921_rftest_evt { __le32 param0; __le32 param1; } __packed; + +struct mt7921_clc_info_tlv { + __le16 tag; + __le16 len; + + u8 chan_conf; /* BIT(0) : Enable UNII-4 + * BIT(1) : Enable UNII-5 + * BIT(2) : Enable UNII-6 + * BIT(3) : Enable UNII-7 + * BIT(4) : Enable UNII-8 + */ + u8 rsv[63]; +} __packed; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 87dd06855f..5c4cc370e6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -23,10 +23,8 @@ #define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM #define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1) -#define MT7921_SDIO_HDR_TX_BYTES GENMASK(15, 0) -#define MT7921_SDIO_HDR_PKT_TYPE GENMASK(17, 16) - #define MCU_UNI_EVENT_ROC 0x27 +#define MCU_UNI_EVENT_CLC 0x80 enum { UNI_ROC_ACQUIRE, @@ -235,20 +233,7 @@ mt7921_l1_rmw(struct mt792x_dev *dev, u32 addr, u32 mask, u32 val) #define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val) #define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0) -static inline void -mt7921_skb_add_usb_sdio_hdr(struct mt792x_dev *dev, struct sk_buff *skb, - int type) -{ - u32 hdr, len; - - len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr); - hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, len) | - FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type); - - put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr))); -} - -void mt7921_stop(struct ieee80211_hw *hw); +void mt7921_regd_update(struct mt792x_dev *dev); int mt7921_mac_init(struct mt792x_dev *dev); bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask); int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index f04e7095e1..42fd456eb6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -507,6 +507,9 @@ static int mt7921_pci_resume(struct device *device) mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); err = mt76_connac_mcu_set_hif_suspend(mdev, false); + + mt7921_regd_update(dev); + failed: pm->suspended = false; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c index 310eeca024..5e4501d7f1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c @@ -38,7 +38,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, if (cmd == MCU_CMD(FW_SCATTER)) type = MT7921_SDIO_FWDL; - mt7921_skb_add_usb_sdio_hdr(dev, skb, type); + mt792x_skb_add_usb_sdio_hdr(dev, skb, type); pad = round_up(skb->len, 4) - skb->len; __skb_put_zero(skb, pad); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index 59cd3d98bf..e5258c74fc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -43,7 +43,7 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, else ep = MT_EP_OUT_AC_BE; - mt7921_skb_add_usb_sdio_hdr(dev, skb, 0); + mt792x_skb_add_usb_sdio_hdr(dev, skb, 0); pad = round_up(skb->len, 4) + 4 - skb->len; __skb_put_zero(skb, pad); @@ -135,14 +135,6 @@ out: return err; } -static void mt7921u_stop(struct ieee80211_hw *hw) -{ - struct mt792x_dev *dev = mt792x_hw_dev(hw); - - mt76u_stop_tx(&dev->mt76); - mt7921_stop(hw); -} - static int mt7921u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { @@ -189,7 +181,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf, if (!ops) return -ENOMEM; - ops->stop = mt7921u_stop; + ops->stop = mt792xu_stop; mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops); if (!mdev) return -ENOMEM; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7925/Kconfig new file mode 100644 index 0000000000..5854e95e68 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: ISC +config MT7925_COMMON + tristate + select MT792x_LIB + select WANT_DEV_COREDUMP + +config MT7925E + tristate "MediaTek MT7925E (PCIe) support" + select MT7925_COMMON + depends on MAC80211 + depends on PCI + help + This adds support for MT7925-based wireless PCIe devices, + which support operation at 6GHz, 5GHz, and 2.4GHz IEEE 802.11be + 2x2:2SS 4096-QAM, 160MHz channels. + + To compile this driver as a module, choose M here. + +config MT7925U + tristate "MediaTek MT7925U (USB) support" + select MT792x_USB + select MT7925_COMMON + depends on MAC80211 + depends on USB + help + This adds support for MT7925-based wireless USB devices, + which support operation at 6GHz, 5GHz, and 2.4GHz IEEE 802.11be + 2x2:2SS 4096-QAM, 160MHz channels. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile new file mode 100644 index 0000000000..d321e4ed73 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: ISC + +obj-$(CONFIG_MT7925_COMMON) += mt7925-common.o +obj-$(CONFIG_MT7925E) += mt7925e.o +obj-$(CONFIG_MT7925U) += mt7925u.o + +mt7925-common-y := mac.o mcu.o main.o init.o debugfs.o +mt7925e-y := pci.o pci_mac.o pci_mcu.o +mt7925u-y := usb.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c new file mode 100644 index 0000000000..1e2fc6577e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt7925.h" +#include "mcu.h" + +static int +mt7925_reg_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + u32 regval = val; + + mt792x_mutex_acquire(dev); + mt7925_mcu_regval(dev, dev->mt76.debugfs_reg, ®val, true); + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_reg_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + u32 regval; + int ret; + + mt792x_mutex_acquire(dev); + ret = mt7925_mcu_regval(dev, dev->mt76.debugfs_reg, ®val, false); + mt792x_mutex_release(dev); + if (!ret) + *val = regval; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7925_reg_get, mt7925_reg_set, + "0x%08llx\n"); +static int +mt7925_fw_debug_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + + mt792x_mutex_acquire(dev); + + dev->fw_debug = (u8)val; + mt7925_mcu_fw_log_2_host(dev, dev->fw_debug); + + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_fw_debug_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + + *val = dev->fw_debug; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7925_fw_debug_get, + mt7925_fw_debug_set, "%lld\n"); + +DEFINE_SHOW_ATTRIBUTE(mt792x_tx_stats); + +static void +mt7925_seq_puts_array(struct seq_file *file, const char *str, + s8 val[][2], int len, u8 band_idx) +{ + int i; + + seq_printf(file, "%-22s:", str); + for (i = 0; i < len; i++) + if (val[i][band_idx] == 127) + seq_printf(file, " %6s", "N.A"); + else + seq_printf(file, " %6d", val[i][band_idx]); + seq_puts(file, "\n"); +} + +#define mt7925_print_txpwr_entry(prefix, rate, idx) \ +({ \ + mt7925_seq_puts_array(s, #prefix " (tmac)", \ + txpwr->rate, \ + ARRAY_SIZE(txpwr->rate), \ + idx); \ +}) + +static inline void +mt7925_eht_txpwr(struct seq_file *s, struct mt7925_txpwr *txpwr, u8 band_idx) +{ + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11", + "mcs12", "mcs13", "mcs14", "mcs15"); + mt7925_print_txpwr_entry(EHT26, eht26, band_idx); + mt7925_print_txpwr_entry(EHT52, eht52, band_idx); + mt7925_print_txpwr_entry(EHT106, eht106, band_idx); + mt7925_print_txpwr_entry(EHT242, eht242, band_idx); + mt7925_print_txpwr_entry(EHT484, eht484, band_idx); + + mt7925_print_txpwr_entry(EHT996, eht996, band_idx); + mt7925_print_txpwr_entry(EHT996x2, eht996x2, band_idx); + mt7925_print_txpwr_entry(EHT996x4, eht996x4, band_idx); + mt7925_print_txpwr_entry(EHT26_52, eht26_52, band_idx); + mt7925_print_txpwr_entry(EHT26_106, eht26_106, band_idx); + mt7925_print_txpwr_entry(EHT484_242, eht484_242, band_idx); + mt7925_print_txpwr_entry(EHT996_484, eht996_484, band_idx); + mt7925_print_txpwr_entry(EHT996_484_242, eht996_484_242, band_idx); + mt7925_print_txpwr_entry(EHT996x2_484, eht996x2_484, band_idx); + mt7925_print_txpwr_entry(EHT996x3, eht996x3, band_idx); + mt7925_print_txpwr_entry(EHT996x3_484, eht996x3_484, band_idx); +} + +static int +mt7925_txpwr(struct seq_file *s, void *data) +{ + struct mt792x_dev *dev = dev_get_drvdata(s->private); + struct mt7925_txpwr *txpwr = NULL; + u8 band_idx = dev->mphy.band_idx; + int ret = 0; + + txpwr = devm_kmalloc(dev->mt76.dev, sizeof(*txpwr), GFP_KERNEL); + + if (!txpwr) + return -ENOMEM; + + mt792x_mutex_acquire(dev); + ret = mt7925_get_txpwr_info(dev, band_idx, txpwr); + mt792x_mutex_release(dev); + + if (ret) + goto out; + + seq_printf(s, "%-22s %6s %6s %6s %6s\n", + " ", "1m", "2m", "5m", "11m"); + mt7925_print_txpwr_entry(CCK, cck, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "6m", "9m", "12m", "18m", "24m", "36m", + "48m", "54m"); + mt7925_print_txpwr_entry(OFDM, ofdm, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7"); + mt7925_print_txpwr_entry(HT20, ht20, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs32"); + mt7925_print_txpwr_entry(HT40, ht40, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11"); + mt7925_print_txpwr_entry(VHT20, vht20, band_idx); + mt7925_print_txpwr_entry(VHT40, vht40, band_idx); + + mt7925_print_txpwr_entry(VHT80, vht80, band_idx); + mt7925_print_txpwr_entry(VHT160, vht160, band_idx); + + mt7925_print_txpwr_entry(HE26, he26, band_idx); + mt7925_print_txpwr_entry(HE52, he52, band_idx); + mt7925_print_txpwr_entry(HE106, he106, band_idx); + mt7925_print_txpwr_entry(HE242, he242, band_idx); + mt7925_print_txpwr_entry(HE484, he484, band_idx); + + mt7925_print_txpwr_entry(HE996, he996, band_idx); + mt7925_print_txpwr_entry(HE996x2, he996x2, band_idx); + + mt7925_eht_txpwr(s, txpwr, band_idx); + +out: + devm_kfree(dev->mt76.dev, txpwr); + return ret; +} + +static int +mt7925_pm_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + struct mt76_connac_pm *pm = &dev->pm; + + if (mt76_is_usb(&dev->mt76)) + return -EOPNOTSUPP; + + mutex_lock(&dev->mt76.mutex); + + if (val == pm->enable_user) + goto out; + + if (!pm->enable_user) { + pm->stats.last_wake_event = jiffies; + pm->stats.last_doze_event = jiffies; + } + /* make sure the chip is awake here and ps_work is scheduled + * just at end of the this routine. + */ + pm->enable = false; + mt76_connac_pm_wake(&dev->mphy, pm); + + pm->enable_user = val; + mt7925_set_runtime_pm(dev); + mt76_connac_power_save_sched(&dev->mphy, pm); +out: + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static int +mt7925_pm_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + + *val = dev->pm.enable_user; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7925_pm_get, mt7925_pm_set, "%lld\n"); + +static int +mt7925_deep_sleep_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + struct mt76_connac_pm *pm = &dev->pm; + bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR); + bool enable = !!val; + + if (mt76_is_usb(&dev->mt76)) + return -EOPNOTSUPP; + + mt792x_mutex_acquire(dev); + if (pm->ds_enable_user == enable) + goto out; + + pm->ds_enable_user = enable; + pm->ds_enable = enable && !monitor; + mt7925_mcu_set_deep_sleep(dev, pm->ds_enable); +out: + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_deep_sleep_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + + *val = dev->pm.ds_enable_user; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_ds, mt7925_deep_sleep_get, + mt7925_deep_sleep_set, "%lld\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt792x_pm_idle_timeout_get, + mt792x_pm_idle_timeout_set, "%lld\n"); + +static int mt7925_chip_reset(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + int ret = 0; + + switch (val) { + case 1: + /* Reset wifisys directly. */ + mt792x_reset(&dev->mt76); + break; + default: + /* Collect the core dump before reset wifisys. */ + mt792x_mutex_acquire(dev); + ret = mt7925_mcu_chip_config(dev, "assert"); + mt792x_mutex_release(dev); + break; + } + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7925_chip_reset, "%lld\n"); + +int mt7925_init_debugfs(struct mt792x_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs_fops(&dev->mphy, &fops_regval); + if (!dir) + return -ENOMEM; + + if (mt76_is_mmio(&dev->mt76)) + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", + dir, mt792x_queues_read); + else + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", + dir, mt76_queues_read); + + debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, + mt792x_queues_acq); + debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir, + mt7925_txpwr); + debugfs_create_file("tx_stats", 0400, dir, dev, &mt792x_tx_stats_fops); + debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); + debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); + debugfs_create_file("idle-timeout", 0600, dir, dev, + &fops_pm_idle_timeout); + debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset); + debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, + mt792x_pm_stats); + debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c new file mode 100644 index 0000000000..8f9b7a2f37 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include +#include +#include "mt7925.h" +#include "mac.h" +#include "mcu.h" + +static void +mt7925_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *req) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_dev *mdev = &dev->mt76; + + /* allow world regdom at the first boot only */ + if (!memcmp(req->alpha2, "00", 2) && + mdev->alpha2[0] && mdev->alpha2[1]) + return; + + /* do not need to update the same country twice */ + if (!memcmp(req->alpha2, mdev->alpha2, 2) && + dev->country_ie_env == req->country_ie_env) + return; + + memcpy(mdev->alpha2, req->alpha2, 2); + mdev->region = req->dfs_region; + dev->country_ie_env = req->country_ie_env; + + mt792x_mutex_acquire(dev); + mt7925_mcu_set_clc(dev, req->alpha2, req->country_ie_env); + mt7925_mcu_set_channel_domain(hw->priv); + mt7925_set_tx_sar_pwr(hw, NULL); + mt792x_mutex_release(dev); +} + +static void mt7925_mac_init_basic_rates(struct mt792x_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mt76_rates); i++) { + u16 rate = mt76_rates[i].hw_value; + u16 idx = MT792x_BASIC_RATES_TBL + i; + + rate = FIELD_PREP(MT_TX_RATE_MODE, rate >> 8) | + FIELD_PREP(MT_TX_RATE_IDX, rate & GENMASK(7, 0)); + mt7925_mac_set_fixed_rate_table(dev, idx, rate); + } +} + +int mt7925_mac_init(struct mt792x_dev *dev) +{ + int i; + + mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536); + /* enable hardware de-agg */ + mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); + + for (i = 0; i < MT792x_WTBL_SIZE; i++) + mt7925_mac_wtbl_update(dev, i, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + for (i = 0; i < 2; i++) + mt792x_mac_init_band(dev, i); + + mt7925_mac_init_basic_rates(dev); + + memzero_explicit(&dev->mt76.alpha2, sizeof(dev->mt76.alpha2)); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mac_init); + +static int __mt7925_init_hardware(struct mt792x_dev *dev) +{ + int ret; + + ret = mt792x_mcu_init(dev); + if (ret) + goto out; + + mt76_eeprom_override(&dev->mphy); + + ret = mt7925_mcu_set_eeprom(dev); + if (ret) + goto out; + + ret = mt7925_mac_init(dev); + if (ret) + goto out; + +out: + return ret; +} + +static int mt7925_init_hardware(struct mt792x_dev *dev) +{ + int ret, i; + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + for (i = 0; i < MT792x_MCU_INIT_RETRY_COUNT; i++) { + ret = __mt7925_init_hardware(dev); + if (!ret) + break; + + mt792x_init_reset(dev); + } + + if (i == MT792x_MCU_INIT_RETRY_COUNT) { + dev_err(dev->mt76.dev, "hardware init failed\n"); + return ret; + } + + return 0; +} + +static void mt7925_init_work(struct work_struct *work) +{ + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + init_work); + int ret; + + ret = mt7925_init_hardware(dev); + if (ret) + return; + + mt76_set_stream_caps(&dev->mphy, true); + mt7925_set_stream_he_eht_caps(&dev->phy); + + ret = mt76_register_device(&dev->mt76, true, mt76_rates, + ARRAY_SIZE(mt76_rates)); + if (ret) { + dev_err(dev->mt76.dev, "register device failed\n"); + return; + } + + ret = mt7925_init_debugfs(dev); + if (ret) { + dev_err(dev->mt76.dev, "register debugfs failed\n"); + return; + } + + /* we support chip reset now */ + dev->hw_init_done = true; + + mt7925_mcu_set_deep_sleep(dev, dev->pm.ds_enable); +} + +int mt7925_register_device(struct mt792x_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + int ret; + + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; + dev->mt76.tx_worker.fn = mt792x_tx_worker; + + INIT_DELAYED_WORK(&dev->pm.ps_work, mt792x_pm_power_save_work); + INIT_WORK(&dev->pm.wake_work, mt792x_pm_wake_work); + spin_lock_init(&dev->pm.wake.lock); + mutex_init(&dev->pm.mutex); + init_waitqueue_head(&dev->pm.wait); + spin_lock_init(&dev->pm.txq_lock); + INIT_DELAYED_WORK(&dev->mphy.mac_work, mt792x_mac_work); + INIT_DELAYED_WORK(&dev->phy.scan_work, mt7925_scan_work); + INIT_DELAYED_WORK(&dev->coredump.work, mt7925_coredump_work); +#if IS_ENABLED(CONFIG_IPV6) + INIT_WORK(&dev->ipv6_ns_work, mt7925_set_ipv6_ns_work); + skb_queue_head_init(&dev->ipv6_ns_list); +#endif + skb_queue_head_init(&dev->phy.scan_event_list); + skb_queue_head_init(&dev->coredump.msg_list); + + INIT_WORK(&dev->reset_work, mt7925_mac_reset_work); + INIT_WORK(&dev->init_work, mt7925_init_work); + + INIT_WORK(&dev->phy.roc_work, mt7925_roc_work); + timer_setup(&dev->phy.roc_timer, mt792x_roc_timer, 0); + init_waitqueue_head(&dev->phy.roc_wait); + + dev->pm.idle_timeout = MT792x_PM_TIMEOUT; + dev->pm.stats.last_wake_event = jiffies; + dev->pm.stats.last_doze_event = jiffies; + if (!mt76_is_usb(&dev->mt76)) { + dev->pm.enable_user = true; + dev->pm.enable = true; + dev->pm.ds_enable_user = true; + dev->pm.ds_enable = true; + } + + if (!mt76_is_mmio(&dev->mt76)) + hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE; + + mt792x_init_acpi_sar(dev); + + ret = mt792x_init_wcid(dev); + if (ret) + return ret; + + ret = mt792x_init_wiphy(hw); + if (ret) + return ret; + + hw->wiphy->reg_notifier = mt7925_regd_notifier; + dev->mphy.sband_2g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + dev->mphy.sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_2; + dev->mphy.sband_5g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + dev->mphy.sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_1; + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | + (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | + IEEE80211_VHT_CAP_SHORT_GI_160; + + dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; + + queue_work(system_wq, &dev->init_work); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_register_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c new file mode 100644 index 0000000000..1b9fbd9a14 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -0,0 +1,1452 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include +#include +#include +#include "mt7925.h" +#include "../dma.h" +#include "mac.h" +#include "mcu.h" + +bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask) +{ + mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); + + return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, + 0, 5000); +} + +static void mt7925_mac_sta_poll(struct mt792x_dev *dev) +{ + static const u8 ac_to_tid[] = { + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + [IEEE80211_AC_VI] = 4, + [IEEE80211_AC_VO] = 6 + }; + struct ieee80211_sta *sta; + struct mt792x_sta *msta; + u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; + LIST_HEAD(sta_poll_list); + struct rate_info *rate; + s8 rssi[4]; + int i; + + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + while (true) { + bool clear = false; + u32 addr, val; + u16 idx; + u8 bw; + + if (list_empty(&sta_poll_list)) + break; + msta = list_first_entry(&sta_poll_list, + struct mt792x_sta, wcid.poll_list); + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + idx = msta->wcid.idx; + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET); + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u32 tx_last = msta->airtime_ac[i]; + u32 rx_last = msta->airtime_ac[i + 4]; + + msta->airtime_ac[i] = mt76_rr(dev, addr); + msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + + tx_time[i] = msta->airtime_ac[i] - tx_last; + rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + + if ((tx_last | rx_last) & BIT(30)) + clear = true; + + addr += 8; + } + + if (clear) { + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + } + + if (!msta->wcid.sta) + continue; + + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u8 q = mt76_connac_lmac_mapping(i); + u32 tx_cur = tx_time[q]; + u32 rx_cur = rx_time[q]; + u8 tid = ac_to_tid[i]; + + if (!tx_cur && !rx_cur) + continue; + + ieee80211_sta_register_airtime(sta, tid, tx_cur, + rx_cur); + } + + /* We don't support reading GI info from txs packets. + * For accurate tx status reporting and AQL improvement, + * we need to make sure that flags match so polling GI + * from per-sta counters directly. + */ + rate = &msta->wcid.rate; + + switch (rate->bw) { + case RATE_INFO_BW_160: + bw = IEEE80211_STA_RX_BW_160; + break; + case RATE_INFO_BW_80: + bw = IEEE80211_STA_RX_BW_80; + break; + case RATE_INFO_BW_40: + bw = IEEE80211_STA_RX_BW_40; + break; + default: + bw = IEEE80211_STA_RX_BW_20; + break; + } + + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 6); + val = mt76_rr(dev, addr); + if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) { + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 5); + val = mt76_rr(dev, addr); + rate->eht_gi = FIELD_GET(GENMASK(25, 24), val); + } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { + u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw; + + rate->he_gi = (val & (0x3 << offs)) >> offs; + } else if (rate->flags & + (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { + if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw)) + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; + else + rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; + } + + /* get signal strength of resp frames (CTS/BA/ACK) */ + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 34); + val = mt76_rr(dev, addr); + + rssi[0] = to_rssi(GENMASK(7, 0), val); + rssi[1] = to_rssi(GENMASK(15, 8), val); + rssi[2] = to_rssi(GENMASK(23, 16), val); + rssi[3] = to_rssi(GENMASK(31, 14), val); + + msta->ack_signal = + mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); + + ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); + } +} + +void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev, + u8 tbl_idx, u16 rate_idx) +{ + u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx; + + mt76_wr(dev, MT_WTBL_ITDR0, rate_idx); + /* use wtbl spe idx */ + mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL); + mt76_wr(dev, MT_WTBL_ITCR, ctrl); +} + +/* The HW does not translate the mac header to 802.3 for mesh point */ +static int mt7925_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); + struct mt792x_sta *msta = (struct mt792x_sta *)status->wcid; + __le32 *rxd = (__le32 *)skb->data; + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct ieee80211_hdr hdr; + u16 frame_control; + + if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != + MT_RXD3_NORMAL_U2M) + return -EINVAL; + + if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) + return -EINVAL; + + if (!msta || !msta->vif) + return -EINVAL; + + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + /* store the info from RXD and ethhdr to avoid being overridden */ + frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); + hdr.frame_control = cpu_to_le16(frame_control); + hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); + hdr.duration_id = 0; + + ether_addr_copy(hdr.addr1, vif->addr); + ether_addr_copy(hdr.addr2, sta->addr); + switch (frame_control & (IEEE80211_FCTL_TODS | + IEEE80211_FCTL_FROMDS)) { + case 0: + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); + break; + case IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr->h_source); + break; + case IEEE80211_FCTL_TODS: + ether_addr_copy(hdr.addr3, eth_hdr->h_dest); + break; + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr->h_dest); + ether_addr_copy(hdr.addr4, eth_hdr->h_source); + break; + default: + break; + } + + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); + if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || + eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); + else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); + else + skb_pull(skb, 2); + + if (ieee80211_has_order(hdr.frame_control)) + memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], + IEEE80211_HT_CTL_LEN); + if (ieee80211_is_data_qos(hdr.frame_control)) { + __le16 qos_ctrl; + + qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); + memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, + IEEE80211_QOS_CTL_LEN); + } + + if (ieee80211_has_a4(hdr.frame_control)) + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); + else + memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); + + return 0; +} + +static int +mt7925_mac_fill_rx_rate(struct mt792x_dev *dev, + struct mt76_rx_status *status, + struct ieee80211_supported_band *sband, + __le32 *rxv, u8 *mode) +{ + u32 v0, v2; + u8 stbc, gi, bw, dcm, nss; + int i, idx; + bool cck = false; + + v0 = le32_to_cpu(rxv[0]); + v2 = le32_to_cpu(rxv[2]); + + idx = FIELD_GET(MT_PRXV_TX_RATE, v0); + i = idx; + nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; + + stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); + gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); + *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); + dcm = FIELD_GET(MT_PRXV_DCM, v2); + bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); + + switch (*mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + i = mt76_get_rate(&dev->mt76, sband, i, cck); + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + if (gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (i > 31) + return -EINVAL; + break; + case MT_PHY_TYPE_VHT: + status->nss = nss; + status->encoding = RX_ENC_VHT; + if (gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (i > 11) + return -EINVAL; + break; + case MT_PHY_TYPE_HE_MU: + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + status->nss = nss; + status->encoding = RX_ENC_HE; + i &= GENMASK(3, 0); + + if (gi <= NL80211_RATE_INFO_HE_GI_3_2) + status->he_gi = gi; + + status->he_dcm = dcm; + break; + case MT_PHY_TYPE_EHT_SU: + case MT_PHY_TYPE_EHT_TRIG: + case MT_PHY_TYPE_EHT_MU: + status->nss = nss; + status->encoding = RX_ENC_EHT; + i &= GENMASK(3, 0); + + if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) + status->eht.gi = gi; + break; + default: + return -EINVAL; + } + status->rate_idx = i; + + switch (bw) { + case IEEE80211_STA_RX_BW_20: + break; + case IEEE80211_STA_RX_BW_40: + if (*mode & MT_PHY_TYPE_HE_EXT_SU && + (idx & MT_PRXV_TX_ER_SU_106T)) { + status->bw = RATE_INFO_BW_HE_RU; + status->he_ru = + NL80211_RATE_INFO_HE_RU_ALLOC_106; + } else { + status->bw = RATE_INFO_BW_40; + } + break; + case IEEE80211_STA_RX_BW_80: + status->bw = RATE_INFO_BW_80; + break; + case IEEE80211_STA_RX_BW_160: + status->bw = RATE_INFO_BW_160; + break; + default: + return -EINVAL; + } + + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; + if (*mode < MT_PHY_TYPE_HE_SU && gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + return 0; +} + +static int +mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) +{ + u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + bool hdr_trans, unicast, insert_ccmp_hdr = false; + u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info; + u16 hdr_gap; + __le32 *rxv = NULL, *rxd = (__le32 *)skb->data; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt792x_phy *phy = &dev->phy; + struct ieee80211_supported_band *sband; + u32 csum_status = *(u32 *)skb->cb; + u32 rxd0 = le32_to_cpu(rxd[0]); + u32 rxd1 = le32_to_cpu(rxd[1]); + u32 rxd2 = le32_to_cpu(rxd[2]); + u32 rxd3 = le32_to_cpu(rxd[3]); + u32 rxd4 = le32_to_cpu(rxd[4]); + struct mt792x_sta *msta = NULL; + u8 mode = 0; /* , band_idx; */ + u16 seq_ctrl = 0; + __le16 fc = 0; + int idx; + + memset(status, 0, sizeof(*status)); + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) + return -EINVAL; + + if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) + return -EINVAL; + + hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; + if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) + return -EINVAL; + + /* ICV error or CCMP/BIP/WPI MIC error */ + if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) + status->flag |= RX_FLAG_ONLY_MONITOR; + + chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3); + unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; + idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); + status->wcid = mt792x_rx_get_wcid(dev, idx, unicast); + + if (status->wcid) { + msta = container_of(status->wcid, struct mt792x_sta, wcid); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + } + + mt792x_get_status_freq_info(status, chfreq); + + switch (status->band) { + case NL80211_BAND_5GHZ: + sband = &mphy->sband_5g.sband; + break; + case NL80211_BAND_6GHZ: + sband = &mphy->sband_6g.sband; + break; + default: + sband = &mphy->sband_2g.sband; + break; + } + + if (!sband->channels) + return -EINVAL; + + if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && + !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (rxd3 & MT_RXD3_NORMAL_FCS_ERR) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && + !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED; + status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; + } + + remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); + + if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) + return -EINVAL; + + rxd += 8; + if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { + u32 v0 = le32_to_cpu(rxd[0]); + u32 v2 = le32_to_cpu(rxd[2]); + + /* TODO: need to map rxd address */ + fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); + seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); + qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); + + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { + u8 *data = (u8 *)rxd; + + if (status->flag & RX_FLAG_DECRYPTED) { + switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { + case MT_CIPHER_AES_CCMP: + case MT_CIPHER_CCMP_CCX: + case MT_CIPHER_CCMP_256: + insert_ccmp_hdr = + FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); + fallthrough; + case MT_CIPHER_TKIP: + case MT_CIPHER_TKIP_NO_MIC: + case MT_CIPHER_GCMP: + case MT_CIPHER_GCMP_256: + status->iv[0] = data[5]; + status->iv[1] = data[4]; + status->iv[2] = data[3]; + status->iv[3] = data[2]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + break; + default: + break; + } + } + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + /* RXD Group 3 - P-RXV */ + if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { + u32 v3; + int ret; + + rxv = rxd; + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + + v3 = le32_to_cpu(rxv[3]); + + status->chains = mphy->antenna_mask; + status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); + status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); + status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); + status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); + + /* RXD Group 5 - C-RXV */ + if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { + rxd += 24; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + ret = mt7925_mac_fill_rx_rate(dev, status, sband, rxv, &mode); + if (ret < 0) + return ret; + } + + amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); + status->amsdu = !!amsdu_info; + if (status->amsdu) { + status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; + status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; + } + + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; + if (hdr_trans && ieee80211_has_morefrags(fc)) { + if (mt7925_reverse_frag0_hdr_trans(skb, hdr_gap)) + return -EINVAL; + hdr_trans = false; + } else { + int pad_start = 0; + + skb_pull(skb, hdr_gap); + if (!hdr_trans && status->amsdu) { + pad_start = ieee80211_get_hdrlen_from_skb(skb); + } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { + /* When header translation failure is indicated, + * the hardware will insert an extra 2-byte field + * containing the data length after the protocol + * type field. + */ + pad_start = 12; + if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) + pad_start += 4; + else + pad_start = 0; + } + + if (pad_start) { + memmove(skb->data + 2, skb->data, pad_start); + skb_pull(skb, 2); + } + } + + if (!hdr_trans) { + struct ieee80211_hdr *hdr; + + if (insert_ccmp_hdr) { + u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); + + mt76_insert_ccmp_hdr(skb, key_id); + } + + hdr = mt76_skb_get_hdr(skb); + fc = hdr->frame_control; + if (ieee80211_is_data_qos(fc)) { + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + qos_ctl = *ieee80211_get_qos_ctl(hdr); + } + } else { + status->flag |= RX_FLAG_8023; + } + + mt792x_mac_assoc_rssi(dev, skb); + + if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) + mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); + + if (!status->wcid || !ieee80211_is_data_qos(fc)) + return 0; + + status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc); + status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); + status->qos_ctl = qos_ctl; + + return 0; +} + +static void +mt7925_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb, + struct mt76_wcid *wcid) +{ + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + u8 fc_type, fc_stype; + u16 ethertype; + bool wmm = false; + u32 val; + + if (wcid->sta) { + struct ieee80211_sta *sta; + + sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); + wmm = sta->wme; + } + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | + FIELD_PREP(MT_TXD1_TID, tid); + + ethertype = get_unaligned_be16(&skb->data[12]); + if (ethertype >= ETH_P_802_3_MIN) + val |= MT_TXD1_ETH_802_3; + + txwi[1] |= cpu_to_le32(val); + + fc_type = IEEE80211_FTYPE_DATA >> 2; + fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); + + txwi[2] |= cpu_to_le32(val); +} + +static void +mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi, + struct sk_buff *skb, + struct ieee80211_key_conf *key) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool multicast = is_multicast_ether_addr(hdr->addr1); + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + __le16 fc = hdr->frame_control; + u8 fc_type, fc_stype; + u32 val; + + if (ieee80211_is_action(fc) && + mgmt->u.action.category == WLAN_CATEGORY_BACK && + mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) + tid = MT_TX_ADDBA; + else if (ieee80211_is_mgmt(hdr->frame_control)) + tid = MT_TX_NORMAL; + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | + FIELD_PREP(MT_TXD1_HDR_INFO, + ieee80211_get_hdrlen_from_skb(skb) / 2) | + FIELD_PREP(MT_TXD1_TID, tid); + + if (!ieee80211_is_data(fc) || multicast || + info->flags & IEEE80211_TX_CTL_USE_MINRATE) + val |= MT_TXD1_FIXED_RATE; + + if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && + key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + val |= MT_TXD1_BIP; + txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); + } + + txwi[1] |= cpu_to_le32(val); + + fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; + fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); + + txwi[2] |= cpu_to_le32(val); + + txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); + if (ieee80211_is_beacon(fc)) + txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); + + if (info->flags & IEEE80211_TX_CTL_INJECTED) { + u16 seqno = le16_to_cpu(hdr->seq_ctrl); + + if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar; + + bar = (struct ieee80211_bar *)skb->data; + seqno = le16_to_cpu(bar->start_seq_num); + } + + val = MT_TXD3_SN_VALID | + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); + txwi[3] |= cpu_to_le32(val); + txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); + } +} + +void +mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, int pid, + enum mt76_txq_id qid, u32 changed) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0; + u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE; + bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + struct mt76_vif *mvif; + bool beacon = !!(changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)); + bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)); + + mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL; + if (mvif) { + omac_idx = mvif->omac_idx; + wmm_idx = mvif->wmm_idx; + band_idx = mvif->band_idx; + } + + if (inband_disc) { + p_fmt = MT_TX_TYPE_FW; + q_idx = MT_LMAC_ALTX0; + } else if (beacon) { + p_fmt = MT_TX_TYPE_FW; + q_idx = MT_LMAC_BCN0; + } else if (qid >= MT_TXQ_PSD) { + p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; + q_idx = MT_LMAC_ALTX0; + } else { + p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; + q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS + + mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); + + /* counting non-offloading skbs */ + wcid->stats.tx_bytes += skb->len; + wcid->stats.tx_packets++; + } + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | + FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | + FIELD_PREP(MT_TXD0_Q_IDX, q_idx); + txwi[0] = cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | + FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); + + if (band_idx) + val |= FIELD_PREP(MT_TXD1_TGID, band_idx); + + txwi[1] = cpu_to_le32(val); + txwi[2] = 0; + + val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15); + + if (key) + val |= MT_TXD3_PROTECT_FRAME; + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + val |= MT_TXD3_NO_ACK; + if (wcid->amsdu) + val |= MT_TXD3_HW_AMSDU; + + txwi[3] = cpu_to_le32(val); + txwi[4] = 0; + + val = FIELD_PREP(MT_TXD5_PID, pid); + if (pid >= MT_PACKET_ID_FIRST) { + val |= MT_TXD5_TX_STATUS_HOST; + txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); + txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); + } + + txwi[5] = cpu_to_le32(val); + + val = MT_TXD6_DIS_MAT | MT_TXD6_DAS | + FIELD_PREP(MT_TXD6_MSDU_CNT, 1); + txwi[6] = cpu_to_le32(val); + txwi[7] = 0; + + if (is_8023) + mt7925_mac_write_txwi_8023(txwi, skb, wcid); + else + mt7925_mac_write_txwi_80211(dev, txwi, skb, key); + + if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + bool mcast = ieee80211_is_data(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr1); + u8 idx = MT792x_BASIC_RATES_TBL; + + if (mvif) { + if (mcast && mvif->mcast_rates_idx) + idx = mvif->mcast_rates_idx; + else if (beacon && mvif->beacon_rates_idx) + idx = mvif->beacon_rates_idx; + else + idx = mvif->basic_rates_idx; + } + + txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx)); + txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); + } +} +EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi); + +static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +{ + struct mt792x_sta *msta; + u16 fc, tid; + u32 val; + + if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) + return; + + tid = le32_get_bits(txwi[1], MT_TXD1_TID); + if (tid >= 6) /* skip VO queue */ + return; + + val = le32_to_cpu(txwi[2]); + fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | + FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) + return; + + msta = (struct mt792x_sta *)sta->drv_priv; + if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) + ieee80211_start_tx_ba_session(sta, tid, 0); +} + +static bool +mt7925_mac_add_txs_skb(struct mt792x_dev *dev, struct mt76_wcid *wcid, + int pid, __le32 *txs_data) +{ + struct mt76_sta_stats *stats = &wcid->stats; + struct ieee80211_supported_band *sband; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_phy *mphy; + struct ieee80211_tx_info *info; + struct sk_buff_head list; + struct rate_info rate = {}; + struct sk_buff *skb; + bool cck = false; + u32 txrate, txs, mode, stbc; + + mt76_tx_status_lock(mdev, &list); + skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); + if (!skb) + goto out_no_skb; + + txs = le32_to_cpu(txs_data[0]); + + info = IEEE80211_SKB_CB(skb); + if (!(txs & MT_TXS0_ACK_ERROR_MASK)) + info->flags |= IEEE80211_TX_STAT_ACK; + + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = !!(info->flags & + IEEE80211_TX_STAT_ACK); + + info->status.rates[0].idx = -1; + + txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); + + rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); + rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; + stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); + + if (stbc && rate.nss > 1) + rate.nss >>= 1; + + if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) + stats->tx_nss[rate.nss - 1]++; + if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) + stats->tx_mcs[rate.mcs]++; + + mode = FIELD_GET(MT_TX_RATE_MODE, txrate); + switch (mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + mphy = mt76_dev_phy(mdev, wcid->phy_idx); + + if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) + sband = &mphy->sband_6g.sband; + else + sband = &mphy->sband_2g.sband; + + rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); + rate.legacy = sband->bitrates[rate.mcs].bitrate; + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + if (rate.mcs > 31) + goto out; + + rate.flags = RATE_INFO_FLAGS_MCS; + if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) + rate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_VHT: + if (rate.mcs > 9) + goto out; + + rate.flags = RATE_INFO_FLAGS_VHT_MCS; + break; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + case MT_PHY_TYPE_HE_MU: + if (rate.mcs > 11) + goto out; + + rate.he_gi = wcid->rate.he_gi; + rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); + rate.flags = RATE_INFO_FLAGS_HE_MCS; + break; + case MT_PHY_TYPE_EHT_SU: + case MT_PHY_TYPE_EHT_TRIG: + case MT_PHY_TYPE_EHT_MU: + if (rate.mcs > 13) + goto out; + + rate.eht_gi = wcid->rate.eht_gi; + rate.flags = RATE_INFO_FLAGS_EHT_MCS; + break; + default: + goto out; + } + + stats->tx_mode[mode]++; + + switch (FIELD_GET(MT_TXS0_BW, txs)) { + case IEEE80211_STA_RX_BW_160: + rate.bw = RATE_INFO_BW_160; + stats->tx_bw[3]++; + break; + case IEEE80211_STA_RX_BW_80: + rate.bw = RATE_INFO_BW_80; + stats->tx_bw[2]++; + break; + case IEEE80211_STA_RX_BW_40: + rate.bw = RATE_INFO_BW_40; + stats->tx_bw[1]++; + break; + default: + rate.bw = RATE_INFO_BW_20; + stats->tx_bw[0]++; + break; + } + wcid->rate = rate; + +out: + mt76_tx_status_skb_done(mdev, skb, &list); + +out_no_skb: + mt76_tx_status_unlock(mdev, &list); + + return !!skb; +} + +void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data) +{ + struct mt792x_sta *msta = NULL; + struct mt76_wcid *wcid; + __le32 *txs_data = data; + u16 wcidx; + u8 pid; + + if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) + return; + + wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); + pid = le32_get_bits(txs_data[3], MT_TXS3_PID); + + if (pid < MT_PACKET_ID_FIRST) + return; + + if (wcidx >= MT792x_WTBL_SIZE) + return; + + rcu_read_lock(); + + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + if (!wcid) + goto out; + + msta = container_of(wcid, struct mt792x_sta, wcid); + + mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data); + if (!wcid->sta) + goto out; + + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + +out: + rcu_read_unlock(); +} + +void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, + struct ieee80211_sta *sta, bool clear_status, + struct list_head *free_list) +{ + struct mt76_dev *mdev = &dev->mt76; + __le32 *txwi; + u16 wcid_idx; + + mt76_connac_txp_skb_unmap(mdev, t); + if (!t->skb) + goto out; + + txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); + if (sta) { + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + + if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt7925_tx_check_aggr(sta, txwi); + + wcid_idx = wcid->idx; + } else { + wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + } + + __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); +out: + t->skb = NULL; + mt76_put_txwi(mdev, t); +} +EXPORT_SYMBOL_GPL(mt7925_txwi_free); + +static void +mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) +{ + __le32 *tx_free = (__le32 *)data, *cur_info; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_txwi_cache *txwi; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + LIST_HEAD(free_list); + struct sk_buff *skb, *tmp; + void *end = data + len; + bool wake = false; + u16 total, count = 0; + + /* clean DMA queues and unmap buffers first */ + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); + + if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4)) + return; + + total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); + for (cur_info = &tx_free[2]; count < total; cur_info++) { + u32 msdu, info; + u8 i; + + if (WARN_ON_ONCE((void *)cur_info >= end)) + return; + /* 1'b1: new wcid pair. + * 1'b0: msdu_id with the same 'wcid pair' as above. + */ + info = le32_to_cpu(*cur_info); + if (info & MT_TXFREE_INFO_PAIR) { + struct mt792x_sta *msta; + u16 idx; + + idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); + wcid = rcu_dereference(dev->mt76.wcid[idx]); + sta = wcid_to_sta(wcid); + if (!sta) + continue; + + msta = container_of(wcid, struct mt792x_sta, wcid); + spin_lock_bh(&mdev->sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &mdev->sta_poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); + continue; + } + + if (info & MT_TXFREE_INFO_HEADER) { + if (wcid) { + wcid->stats.tx_retries += + FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; + wcid->stats.tx_failed += + !!FIELD_GET(MT_TXFREE_INFO_STAT, info); + } + continue; + } + + for (i = 0; i < 2; i++) { + msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; + if (msdu == MT_TXFREE_INFO_MSDU_ID) + continue; + + count++; + txwi = mt76_token_release(mdev, msdu, &wake); + if (!txwi) + continue; + + mt7925_txwi_free(dev, txwi, sta, 0, &free_list); + } + } + + mt7925_mac_sta_poll(dev); + + if (wake) + mt76_set_tx_blocked(&dev->mt76, false); + + mt76_worker_schedule(&dev->mt76.tx_worker); + + list_for_each_entry_safe(skb, tmp, &free_list, list) { + skb_list_del_init(skb); + napi_consume_skb(skb, 1); + } +} + +bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + __le32 *rxd = (__le32 *)data; + __le32 *end = (__le32 *)&rxd[len / 4]; + enum rx_pkt_type type; + + type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); + if (type != PKT_TYPE_NORMAL) { + u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); + + if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == + MT_RXD0_SW_PKT_TYPE_FRAME)) + return true; + } + + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */ + mt7925_mac_tx_free(dev, data, len); /* mmio */ + return false; + case PKT_TYPE_TXS: + for (rxd += 4; rxd + 12 <= end; rxd += 12) + mt7925_mac_add_txs(dev, rxd); + return false; + default: + return true; + } +} +EXPORT_SYMBOL_GPL(mt7925_rx_check); + +void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb, u32 *info) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + __le32 *end = (__le32 *)&skb->data[skb->len]; + enum rx_pkt_type type; + u16 flag; + + type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); + flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG); + if (type != PKT_TYPE_NORMAL) { + u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); + + if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == + MT_RXD0_SW_PKT_TYPE_FRAME)) + type = PKT_TYPE_NORMAL; + } + + if (type == PKT_TYPE_RX_EVENT && flag == 0x1) + type = PKT_TYPE_NORMAL_MCU; + + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */ + mt7925_mac_tx_free(dev, skb->data, skb->len); + napi_consume_skb(skb, 1); + break; + case PKT_TYPE_RX_EVENT: + mt7925_mcu_rx_event(dev, skb); + break; + case PKT_TYPE_TXS: + for (rxd += 2; rxd + 8 <= end; rxd += 8) + mt7925_mac_add_txs(dev, rxd); + dev_kfree_skb(skb); + break; + case PKT_TYPE_NORMAL_MCU: + case PKT_TYPE_NORMAL: + if (!mt7925_mac_fill_rx(dev, skb)) { + mt76_rx(&dev->mt76, q, skb); + return; + } + fallthrough; + default: + dev_kfree_skb(skb); + break; + } +} +EXPORT_SYMBOL_GPL(mt7925_queue_rx_skb); + +static void +mt7925_vif_connect_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mvif->phy->dev; + struct ieee80211_hw *hw = mt76_hw(dev); + + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_disconnect(vif, true); + + mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); + mt7925_mcu_set_tx(dev, vif); + + if (vif->type == NL80211_IFTYPE_AP) { + mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid, + true, NULL); + mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_NONE); + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true); + } +} + +/* system error recovery */ +void mt7925_mac_reset_work(struct work_struct *work) +{ + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + reset_work); + struct ieee80211_hw *hw = mt76_hw(dev); + struct mt76_connac_pm *pm = &dev->pm; + int i, ret; + + dev_dbg(dev->mt76.dev, "chip reset\n"); + dev->hw_full_reset = true; + ieee80211_stop_queues(hw); + + cancel_delayed_work_sync(&dev->mphy.mac_work); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + + for (i = 0; i < 10; i++) { + mutex_lock(&dev->mt76.mutex); + ret = mt792x_dev_reset(dev); + mutex_unlock(&dev->mt76.mutex); + + if (!ret) + break; + } + + if (i == 10) + dev_err(dev->mt76.dev, "chip reset failed\n"); + + if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(dev->mphy.hw, &info); + } + + dev->hw_full_reset = false; + pm->suspended = false; + ieee80211_wake_queues(hw); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_vif_connect_iter, NULL); + mt76_connac_power_save_sched(&dev->mt76.phy, pm); +} + +void mt7925_coredump_work(struct work_struct *work) +{ + struct mt792x_dev *dev; + char *dump, *data; + + dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev, + coredump.work.work); + + if (time_is_after_jiffies(dev->coredump.last_activity + + 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) { + queue_delayed_work(dev->mt76.wq, &dev->coredump.work, + MT76_CONNAC_COREDUMP_TIMEOUT); + return; + } + + dump = vzalloc(MT76_CONNAC_COREDUMP_SZ); + data = dump; + + while (true) { + struct sk_buff *skb; + + spin_lock_bh(&dev->mt76.lock); + skb = __skb_dequeue(&dev->coredump.msg_list); + spin_unlock_bh(&dev->mt76.lock); + + if (!skb) + break; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 8); + if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { + dev_kfree_skb(skb); + continue; + } + + memcpy(data, skb->data, skb->len); + data += skb->len; + + dev_kfree_skb(skb); + } + + if (dump) + dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, + GFP_KERNEL); + + mt792x_reset(&dev->mt76); +} + +/* usb_sdio */ +static void +mt7925_usb_sdio_write_txwi(struct mt792x_dev *dev, struct mt76_wcid *wcid, + enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, + struct sk_buff *skb) +{ + __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); + + memset(txwi, 0, MT_SDIO_TXD_SIZE); + mt7925_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0); + skb_push(skb, MT_SDIO_TXD_SIZE); +} + +int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct sk_buff *skb = tx_info->skb; + int err, pad, pktid; + + if (unlikely(tx_info->skb->len <= ETH_HLEN)) + return -EINVAL; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + if (sta) { + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + + if (time_after(jiffies, msta->last_txs + HZ / 4)) { + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + msta->last_txs = jiffies; + } + } + + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7925_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); + + mt792x_skb_add_usb_sdio_hdr(dev, skb, 0); + pad = round_up(skb->len, 4) - skb->len; + if (mt76_is_usb(mdev)) + pad += 4; + + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; +} +EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_prepare_skb); + +void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + struct mt76_queue_entry *e) +{ + __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE); + unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE; + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + u16 idx; + + idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + wcid = rcu_dereference(mdev->wcid[idx]); + sta = wcid_to_sta(wcid); + + if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt7925_tx_check_aggr(sta, txwi); + + skb_pull(e->skb, headroom); + mt76_tx_complete_skb(mdev, e->wcid, e->skb); +} +EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_complete_skb); + +bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + + mt792x_mutex_acquire(dev); + mt7925_mac_sta_poll(dev); + mt792x_mutex_release(dev); + + return false; +} +EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_status_data); + +#if IS_ENABLED(CONFIG_IPV6) +void mt7925_set_ipv6_ns_work(struct work_struct *work) +{ + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + ipv6_ns_work); + struct sk_buff *skb; + int ret = 0; + + do { + skb = skb_dequeue(&dev->ipv6_ns_list); + + if (!skb) + break; + + mt792x_mutex_acquire(dev); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(OFFLOAD), true); + mt792x_mutex_release(dev); + + } while (!ret); + + if (ret) + skb_queue_purge(&dev->ipv6_ns_list); +} +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.h b/drivers/net/wireless/mediatek/mt76/mt7925/mac.h new file mode 100644 index 0000000000..b10a993326 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_MAC_H +#define __MT7925_MAC_H + +#include "../mt76_connac3_mac.h" + +#define MT_WTBL_TXRX_CAP_RATE_OFFSET 7 +#define MT_WTBL_TXRX_RATE_G2_HE 24 +#define MT_WTBL_TXRX_RATE_G2 12 + +#define MT_WTBL_AC0_CTT_OFFSET 20 + +static inline u32 mt7925_mac_wtbl_lmac_addr(struct mt792x_dev *dev, u16 wcid, u8 dw) +{ + mt76_wr(dev, MT_WTBLON_TOP_WDUCR, + FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); + + return MT_WTBL_LMAC_OFFS(wcid, dw); +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c new file mode 100644 index 0000000000..aa918b9b04 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -0,0 +1,1454 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include +#include +#include +#include +#include +#include +#include "mt7925.h" +#include "mcu.h" +#include "mac.h" + +static void +mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band, + struct ieee80211_sband_iftype_data *data, + enum nl80211_iftype iftype) +{ + struct ieee80211_sta_he_cap *he_cap = &data->he_cap; + struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; + struct ieee80211_he_mcs_nss_supp *he_mcs = &he_cap->he_mcs_nss_supp; + int i, nss = hweight8(phy->mt76->antenna_mask); + u16 mcs_map = 0; + + for (i = 0; i < 8; i++) { + if (i < nss) + mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2)); + else + mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2)); + } + + he_cap->has_he = true; + + he_cap_elem->mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE; + he_cap_elem->mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3; + he_cap_elem->mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU; + + if (band == NL80211_BAND_2GHZ) + he_cap_elem->phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + else + he_cap_elem->phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + + he_cap_elem->phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; + he_cap_elem->phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; + + switch (iftype) { + case NL80211_IFTYPE_AP: + he_cap_elem->mac_cap_info[2] |= + IEEE80211_HE_MAC_CAP2_BSR; + he_cap_elem->mac_cap_info[4] |= + IEEE80211_HE_MAC_CAP4_BQR; + he_cap_elem->mac_cap_info[5] |= + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX; + he_cap_elem->phy_cap_info[3] |= + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; + he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + break; + case NL80211_IFTYPE_STATION: + he_cap_elem->mac_cap_info[1] |= + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; + + if (band == NL80211_BAND_2GHZ) + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G; + else + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G; + + he_cap_elem->phy_cap_info[1] |= + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; + he_cap_elem->phy_cap_info[3] |= + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; + he_cap_elem->phy_cap_info[4] |= + IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; + he_cap_elem->phy_cap_info[5] |= + IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; + he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[7] |= + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; + he_cap_elem->phy_cap_info[8] |= + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; + break; + default: + break; + } + + he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); + + memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); + + if (he_cap_elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss); + } else { + he_cap_elem->phy_cap_info[9] |= + u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US, + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); + } + + if (band == NL80211_BAND_6GHZ) { + u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | + IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; + + cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_0_5, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) | + u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) | + u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454, + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); + + data->he_6ghz_capa.capa = cpu_to_le16(cap); + } +} + +static void +mt7925_init_eht_caps(struct mt792x_phy *phy, enum nl80211_band band, + struct ieee80211_sband_iftype_data *data, + enum nl80211_iftype iftype) +{ + struct ieee80211_sta_eht_cap *eht_cap = &data->eht_cap; + struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp *eht_nss = &eht_cap->eht_mcs_nss_supp; + enum nl80211_chan_width width = phy->mt76->chandef.width; + int nss = hweight8(phy->mt76->antenna_mask); + int sts = hweight16(phy->mt76->chainmask); + u8 val; + + if (!phy->dev->has_eht) + return; + + eht_cap->has_eht = true; + + eht_cap_elem->mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL; + + eht_cap_elem->phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; + + eht_cap_elem->phy_cap_info[0] |= + u8_encode_bits(u8_get_bits(sts - 1, BIT(0)), + IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK); + + eht_cap_elem->phy_cap_info[1] = + u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)), + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | + u8_encode_bits(sts - 1, + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK); + + eht_cap_elem->phy_cap_info[2] = + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) | + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK); + + eht_cap_elem->phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + + eht_cap_elem->phy_cap_info[4] = + u8_encode_bits(min_t(int, sts - 1, 2), + IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK); + + eht_cap_elem->phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US, + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) | + u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)), + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK); + + val = width == NL80211_CHAN_WIDTH_160 ? 0x7 : + width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1; + eht_cap_elem->phy_cap_info[6] = + u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)), + IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) | + u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK); + + eht_cap_elem->phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ; + + val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) | + u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX); + + eht_nss->bw._80.rx_tx_mcs9_max_nss = val; + eht_nss->bw._80.rx_tx_mcs11_max_nss = val; + eht_nss->bw._80.rx_tx_mcs13_max_nss = val; + eht_nss->bw._160.rx_tx_mcs9_max_nss = val; + eht_nss->bw._160.rx_tx_mcs11_max_nss = val; + eht_nss->bw._160.rx_tx_mcs13_max_nss = val; +} + +static void +__mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy, + struct ieee80211_supported_band *sband, + enum nl80211_band band) +{ + struct ieee80211_sband_iftype_data *data = phy->iftype[band]; + int i, n = 0; + + for (i = 0; i < NUM_NL80211_IFTYPES; i++) { + switch (i) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + break; + default: + continue; + } + + data[n].types_mask = BIT(i); + mt7925_init_he_caps(phy, band, &data[n], i); + mt7925_init_eht_caps(phy, band, &data[n], i); + + n++; + } + + _ieee80211_set_sband_iftype_data(sband, data, n); +} + +void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy) +{ + if (phy->mt76->cap.has_2ghz) + __mt7925_set_stream_he_eht_caps(phy, &phy->mt76->sband_2g.sband, + NL80211_BAND_2GHZ); + + if (phy->mt76->cap.has_5ghz) + __mt7925_set_stream_he_eht_caps(phy, &phy->mt76->sband_5g.sband, + NL80211_BAND_5GHZ); + + if (phy->mt76->cap.has_6ghz) + __mt7925_set_stream_he_eht_caps(phy, &phy->mt76->sband_6g.sband, + NL80211_BAND_6GHZ); +} + +int __mt7925_start(struct mt792x_phy *phy) +{ + struct mt76_phy *mphy = phy->mt76; + int err; + + err = mt7925_mcu_set_channel_domain(mphy); + if (err) + return err; + + err = mt7925_mcu_set_rts_thresh(phy, 0x92b); + if (err) + return err; + + err = mt7925_set_tx_sar_pwr(mphy->hw, NULL); + if (err) + return err; + + mt792x_mac_reset_counters(phy); + set_bit(MT76_STATE_RUNNING, &mphy->state); + + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, + MT792x_WATCHDOG_TIME); + + return 0; +} +EXPORT_SYMBOL_GPL(__mt7925_start); + +static int mt7925_start(struct ieee80211_hw *hw) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int err; + + mt792x_mutex_acquire(phy->dev); + err = __mt7925_start(phy); + mt792x_mutex_release(phy->dev); + + return err; +} + +static int +mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt76_txq *mtxq; + int idx, ret = 0; + + mt792x_mutex_acquire(dev); + + mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } + + mvif->mt76.omac_idx = mvif->mt76.idx; + mvif->phy = phy; + mvif->mt76.band_idx = 0; + mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + + if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) + mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; + else + mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL; + + ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, + true); + if (ret) + goto out; + + dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); + phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); + + idx = MT792x_WTBL_RESERVED - mvif->mt76.idx; + + INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_wcid_init(&mvif->sta.wcid); + + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + ewma_rssi_init(&mvif->rssi); + + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + if (vif->txq) { + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + mtxq->wcid = idx; + } + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; +out: + mt792x_mutex_release(dev); + + return ret; +} + +static void mt7925_roc_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = priv; + + mt7925_mcu_abort_roc(phy, mvif, phy->roc_token_id); +} + +void mt7925_roc_work(struct work_struct *work) +{ + struct mt792x_phy *phy; + + phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, + roc_work); + + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + return; + + mt792x_mutex_acquire(phy->dev); + ieee80211_iterate_active_interfaces(phy->mt76->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_roc_iter, phy); + mt792x_mutex_release(phy->dev); + ieee80211_remain_on_channel_expired(phy->mt76->hw); +} + +static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif) +{ + int err = 0; + + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + + mt792x_mutex_acquire(phy->dev); + if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + err = mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + mt792x_mutex_release(phy->dev); + + return err; +} + +static int mt7925_set_roc(struct mt792x_phy *phy, + struct mt792x_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum mt7925_roc_req type) +{ + int err; + + if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)) + return -EBUSY; + + phy->roc_grant = false; + + err = mt7925_mcu_set_roc(phy, vif, chan, duration, type, + ++phy->roc_token_id); + if (err < 0) { + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + goto out; + } + + if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) { + mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + err = -ETIMEDOUT; + } + +out: + return err; +} + +static int mt7925_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int err; + + mt792x_mutex_acquire(phy->dev); + err = mt7925_set_roc(phy, mvif, chan, duration, MT7925_ROC_REQ_ROC); + mt792x_mutex_release(phy->dev); + + return err; +} + +static int mt7925_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + return mt7925_abort_roc(phy, mvif); +} + +static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : + &mvif->sta; + struct mt76_wcid *wcid = &msta->wcid; + u8 *wcid_keyidx = &wcid->hw_key_idx; + int idx = key->keyidx, err = 0; + + /* The hardware does not support per-STA RX GTK, fallback + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + wcid_keyidx = &wcid->hw_key_idx2; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + if (!mvif->wep_sta) + return -EOPNOTSUPP; + break; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_SMS4: + break; + default: + return -EOPNOTSUPP; + } + + mt792x_mutex_acquire(dev); + + if (cmd == SET_KEY && !mvif->mt76.cipher) { + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher); + mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true); + } + + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + + mt76_wcid_key_setup(&dev->mt76, wcid, + cmd == SET_KEY ? key : NULL); + + err = mt7925_mcu_add_key(&dev->mt76, vif, &msta->bip, + key, MCU_UNI_CMD(STA_REC_UPDATE), + &msta->wcid, cmd); + + if (err) + goto out; + + if (key->cipher == WLAN_CIPHER_SUITE_WEP104 || + key->cipher == WLAN_CIPHER_SUITE_WEP40) + err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->bip, + key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), + &mvif->wep_sta->wcid, cmd); + +out: + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = priv; + struct ieee80211_hw *hw = mt76_hw(dev); + bool pm_enable = dev->pm.enable; + int err; + + err = mt7925_mcu_set_beacon_filter(dev, vif, pm_enable); + if (err < 0) + return; + + if (pm_enable) { + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + ieee80211_hw_set(hw, CONNECTION_MONITOR); + } else { + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags); + } +} + +static void +mt7925_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = priv; + struct ieee80211_hw *hw = mt76_hw(dev); + struct mt76_connac_pm *pm = &dev->pm; + bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); + + mt7925_mcu_set_sniffer(dev, vif, monitor); + pm->enable = pm->enable_user && !monitor; + pm->ds_enable = pm->ds_enable_user && !monitor; + + mt7925_mcu_set_deep_sleep(dev, pm->ds_enable); + + if (monitor) + mt7925_mcu_set_beacon_filter(dev, vif, false); +} + +void mt7925_set_runtime_pm(struct mt792x_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + struct mt76_connac_pm *pm = &dev->pm; + bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); + + pm->enable = pm->enable_user && !monitor; + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_pm_interface_iter, dev); + pm->ds_enable = pm->ds_enable_user && !monitor; + mt7925_mcu_set_deep_sleep(dev, pm->ds_enable); +} + +static int mt7925_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int ret = 0; + + mt792x_mutex_acquire(dev); + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + ret = mt7925_set_tx_sar_pwr(hw, NULL); + if (ret) + goto out; + } + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_sniffer_interface_iter, dev); + } + +out: + mt792x_mutex_release(dev); + + return ret; +} + +static void mt7925_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ +#define MT7925_FILTER_FCSFAIL BIT(2) +#define MT7925_FILTER_CONTROL BIT(5) +#define MT7925_FILTER_OTHER_BSS BIT(6) +#define MT7925_FILTER_ENABLE BIT(31) + struct mt792x_dev *dev = mt792x_hw_dev(hw); + u32 flags = MT7925_FILTER_ENABLE; + +#define MT7925_FILTER(_fif, _type) do { \ + if (*total_flags & (_fif)) \ + flags |= MT7925_FILTER_##_type; \ + } while (0) + + MT7925_FILTER(FIF_FCSFAIL, FCSFAIL); + MT7925_FILTER(FIF_CONTROL, CONTROL); + MT7925_FILTER(FIF_OTHER_BSS, OTHER_BSS); + + mt792x_mutex_acquire(dev); + mt7925_mcu_set_rxfilter(dev, flags, 0, 0); + mt792x_mutex_release(dev); + + *total_flags &= (FIF_OTHER_BSS | FIF_FCSFAIL | FIF_CONTROL); +} + +static u8 +mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + bool beacon, bool mcast) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_phy *mphy = hw->priv; + u16 rate; + u8 i, idx, ht; + + rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon, mcast); + ht = FIELD_GET(MT_TX_RATE_MODE, rate) > MT_PHY_TYPE_OFDM; + + if (beacon && ht) { + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + /* must odd index */ + idx = MT7925_BEACON_RATES_TBL + 2 * (mvif->idx % 20); + mt7925_mac_set_fixed_rate_table(dev, idx, rate); + return idx; + } + + idx = FIELD_GET(MT_TX_RATE_IDX, rate); + for (i = 0; i < ARRAY_SIZE(mt76_rates); i++) + if ((mt76_rates[i].hw_value & GENMASK(7, 0)) == idx) + return MT792x_BASIC_RATES_TBL + i; + + return mvif->basic_rates_idx; +} + +static void mt7925_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != phy->slottime) { + phy->slottime = slottime; + mt792x_mac_set_timeing(phy); + } + } + + if (changed & BSS_CHANGED_MCAST_RATE) + mvif->mcast_rates_idx = + mt7925_get_rates_table(hw, vif, false, true); + + if (changed & BSS_CHANGED_BASIC_RATES) + mvif->basic_rates_idx = + mt7925_get_rates_table(hw, vif, false, false); + + if (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)) { + mvif->beacon_rates_idx = + mt7925_get_rates_table(hw, vif, true, false); + + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, + info->enable_beacon); + } + + /* ensure that enable txcmd_mode after bss_info */ + if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) + mt7925_mcu_set_tx(dev, vif); + + if (changed & BSS_CHANGED_PS) + mt7925_mcu_uni_bss_ps(dev, vif); + + if (changed & BSS_CHANGED_ASSOC) { + mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_ASSOC); + mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); + } + + if (changed & BSS_CHANGED_ARP_FILTER) { + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mt7925_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, info); + } + + mt792x_mutex_release(dev); +} + +int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + int ret, idx; + + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); + if (idx < 0) + return -ENOSPC; + + INIT_LIST_HEAD(&msta->wcid.poll_list); + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.phy_idx = mvif->mt76.band_idx; + msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + msta->last_txs = jiffies; + + ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); + if (ret) + return ret; + + if (vif->type == NL80211_IFTYPE_STATION) + mvif->wep_sta = msta; + + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + /* should update bss info before STA add */ + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, + false); + + ret = mt7925_mcu_sta_update(dev, sta, vif, true, + MT76_STA_INFO_STATE_NONE); + if (ret) + return ret; + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mac_sta_add); + +void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mt792x_mutex_acquire(dev); + + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, + true); + + ewma_avg_signal_init(&msta->avg_ack_signal); + + mt7925_mac_wtbl_update(dev, msta->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + + mt7925_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); + + mt792x_mutex_release(dev); +} +EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc); + +void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + mt76_connac_pm_wake(&dev->mphy, &dev->pm); + + mt7925_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE); + mt7925_mac_wtbl_update(dev, msta->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + if (vif->type == NL80211_IFTYPE_STATION) { + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mvif->wep_sta = NULL; + ewma_rssi_init(&mvif->rssi); + if (!sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, + false); + } + + spin_lock_bh(&mdev->sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); +} +EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove); + +static int mt7925_set_rts_threshold(struct ieee80211_hw *hw, u32 val) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + mt7925_mcu_set_rts_thresh(&dev->phy, val); + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct ieee80211_sta *sta = params->sta; + struct ieee80211_txq *txq = sta->txq[params->tid]; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + u16 tid = params->tid; + u16 ssn = params->ssn; + struct mt76_txq *mtxq; + int ret = 0; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + mt792x_mutex_acquire(dev); + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + params->buf_size); + mt7925_mcu_uni_rx_ba(dev, params, true); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt7925_mcu_uni_rx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + mt7925_mcu_uni_tx_ba(dev, params, true); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + clear_bit(tid, &msta->wcid.ampdu_state); + mt7925_mcu_uni_tx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_START: + set_bit(tid, &msta->wcid.ampdu_state); + ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + clear_bit(tid, &msta->wcid.ampdu_state); + mt7925_mcu_uni_tx_ba(dev, params, false); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + mt792x_mutex_release(dev); + + return ret; +} + +static bool is_valid_alpha2(const char *alpha2) +{ + if (!alpha2) + return false; + + if (alpha2[0] == '0' && alpha2[1] == '0') + return true; + + if (isalpha(alpha2[0]) && isalpha(alpha2[1])) + return true; + + return false; +} + +void mt7925_scan_work(struct work_struct *work) +{ + struct mt792x_phy *phy; + + phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, + scan_work.work); + + while (true) { + struct mt76_dev *mdev = &phy->dev->mt76; + struct sk_buff *skb; + struct tlv *tlv; + int tlv_len; + + spin_lock_bh(&phy->dev->mt76.lock); + skb = __skb_dequeue(&phy->scan_event_list); + spin_unlock_bh(&phy->dev->mt76.lock); + + if (!skb) + break; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + tlv = (struct tlv *)skb->data; + tlv_len = skb->len; + + while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) { + struct mt7925_mcu_scan_chinfo_event *evt; + + switch (le16_to_cpu(tlv->tag)) { + case UNI_EVENT_SCAN_DONE_BASIC: + if (test_and_clear_bit(MT76_HW_SCANNING, &phy->mt76->state)) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + ieee80211_scan_completed(phy->mt76->hw, &info); + } + break; + case UNI_EVENT_SCAN_DONE_CHNLINFO: + evt = (struct mt7925_mcu_scan_chinfo_event *)tlv->data; + + if (!is_valid_alpha2(evt->alpha2)) + break; + + if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0') + break; + + mt7925_mcu_set_clc(phy->dev, evt->alpha2, ENVIRON_INDOOR); + + break; + case UNI_EVENT_SCAN_DONE_NLO: + ieee80211_sched_scan_results(phy->mt76->hw); + break; + default: + break; + } + + tlv_len -= le16_to_cpu(tlv->len); + tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len)); + } + + dev_kfree_skb(skb); + } +} + +static int +mt7925_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mt792x_mutex_acquire(dev); + err = mt7925_mcu_hw_scan(mphy, vif, req); + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + + mt792x_mutex_acquire(dev); + mt7925_mcu_cancel_hw_scan(mphy, vif); + mt792x_mutex_release(dev); +} + +static int +mt7925_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mt792x_mutex_acquire(dev); + + err = mt7925_mcu_sched_scan_req(mphy, vif, req); + if (err < 0) + goto out; + + err = mt7925_mcu_sched_scan_enable(mphy, vif, true); +out: + mt792x_mutex_release(dev); + + return err; +} + +static int +mt7925_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mt792x_mutex_acquire(dev); + err = mt7925_mcu_sched_scan_enable(mphy, vif, false); + mt792x_mutex_release(dev); + + return err; +} + +static int +mt7925_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int max_nss = hweight8(hw->wiphy->available_antennas_tx); + + if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss) + return -EINVAL; + + if ((BIT(hweight8(tx_ant)) - 1) != tx_ant) + tx_ant = BIT(ffs(tx_ant) - 1) - 1; + + mt792x_mutex_acquire(dev); + + phy->mt76->antenna_mask = tx_ant; + phy->mt76->chainmask = tx_ant; + + mt76_set_stream_caps(phy->mt76, true); + mt7925_set_stream_he_eht_caps(phy); + + /* TODO: update bmc_wtbl spe_idx when antenna changes */ + mt792x_mutex_release(dev); + + return 0; +} + +#ifdef CONFIG_PM +static int mt7925_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + cancel_delayed_work_sync(&phy->scan_work); + cancel_delayed_work_sync(&phy->mt76->mac_work); + + cancel_delayed_work_sync(&dev->pm.ps_work); + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt792x_mutex_acquire(dev); + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_set_suspend_iter, + &dev->mphy); + + mt792x_mutex_release(dev); + + return 0; +} + +static int mt7925_resume(struct ieee80211_hw *hw) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + mt792x_mutex_acquire(dev); + + set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_set_suspend_iter, + &dev->mphy); + + ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, + MT792x_WATCHDOG_TIME); + + mt792x_mutex_release(dev); + + return 0; +} + +static void mt7925_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + mt76_connac_mcu_update_gtk_rekey(hw, vif, data); + mt792x_mutex_release(dev); +} +#endif /* CONFIG_PM */ + +static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta); + + mt792x_mutex_release(dev); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mvif->phy->dev; + struct inet6_ifaddr *ifa; + struct sk_buff *skb; + u8 idx = 0; + + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7925_arpns_tlv arpns; + struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; + } req_hdr = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .arpns = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), + .len = cpu_to_le16(sizeof(req_hdr) - 4), + .enable = true, + }, + }; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + if (ifa->flags & IFA_F_TENTATIVE) + continue; + req_hdr.ns_addrs[idx] = ifa->addr; + if (++idx >= IEEE80211_BSS_ARP_ADDR_LIST_LEN) + break; + } + read_unlock_bh(&idev->lock); + + if (!idx) + return; + + req_hdr.arpns.ips_num = idx; + + skb = __mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr), + 0, GFP_ATOMIC); + if (!skb) + return; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + + skb_queue_tail(&dev->ipv6_ns_list, skb); + + ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work); +} +#endif + +int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt76_phy *mphy = hw->priv; + + if (sar) { + int err = mt76_init_sar_power(hw, sar); + + if (err) + return err; + } + mt792x_init_acpi_sar_power(mt792x_hw_phy(hw), !sar); + + return mt7925_mcu_set_rate_txpower(mphy); +} + +static int mt7925_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err; + + mt792x_mutex_acquire(dev); + err = mt7925_mcu_set_clc(dev, dev->mt76.alpha2, + dev->country_ie_env); + if (err < 0) + goto out; + + err = mt7925_set_tx_sar_pwr(hw, sar); +out: + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true); + mt792x_mutex_release(dev); +} + +static int +mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err; + + mt792x_mutex_acquire(dev); + + err = mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, + true); + if (err) + goto out; + + err = mt7925_mcu_set_bss_pm(dev, vif, true); + if (err) + goto out; + + err = mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_NONE); +out: + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err; + + mt792x_mutex_acquire(dev); + + err = mt7925_mcu_set_bss_pm(dev, vif, false); + if (err) + goto out; + + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, + false); + +out: + mt792x_mutex_release(dev); +} + +static int +mt7925_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + return 0; +} + +static void +mt7925_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ +} + +static void mt7925_ctx_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_chanctx_conf *ctx = priv; + + if (ctx != mvif->mt76.ctx) + return; + + if (vif->type == NL80211_IFTYPE_MONITOR) { + mt7925_mcu_set_sniffer(mvif->phy->dev, vif, true); + mt7925_mcu_config_sniffer(mvif, ctx); + } else { + mt7925_mcu_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx); + } +} + +static void +mt7925_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + mt792x_mutex_acquire(phy->dev); + ieee80211_iterate_active_interfaces(phy->mt76->hw, + IEEE80211_IFACE_ITER_ACTIVE, + mt7925_ctx_iter, ctx); + mt792x_mutex_release(phy->dev); +} + +static void mt7925_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + u16 duration = info->duration ? info->duration : + jiffies_to_msecs(HZ); + + mt792x_mutex_acquire(dev); + mt7925_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, + MT7925_ROC_REQ_JOIN); + mt792x_mutex_release(dev); +} + +static void mt7925_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mt7925_abort_roc(mvif->phy, mvif); +} + +const struct ieee80211_ops mt7925_ops = { + .tx = mt792x_tx, + .start = mt7925_start, + .stop = mt792x_stop, + .add_interface = mt7925_add_interface, + .remove_interface = mt792x_remove_interface, + .config = mt7925_config, + .conf_tx = mt792x_conf_tx, + .configure_filter = mt7925_configure_filter, + .bss_info_changed = mt7925_bss_info_changed, + .start_ap = mt7925_start_ap, + .stop_ap = mt7925_stop_ap, + .sta_state = mt76_sta_state, + .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, + .set_key = mt7925_set_key, + .sta_set_decap_offload = mt7925_sta_set_decap_offload, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = mt7925_ipv6_addr_change, +#endif /* CONFIG_IPV6 */ + .ampdu_action = mt7925_ampdu_action, + .set_rts_threshold = mt7925_set_rts_threshold, + .wake_tx_queue = mt76_wake_tx_queue, + .release_buffered_frames = mt76_release_buffered_frames, + .channel_switch_beacon = mt7925_channel_switch_beacon, + .get_txpower = mt76_get_txpower, + .get_stats = mt792x_get_stats, + .get_et_sset_count = mt792x_get_et_sset_count, + .get_et_strings = mt792x_get_et_strings, + .get_et_stats = mt792x_get_et_stats, + .get_tsf = mt792x_get_tsf, + .set_tsf = mt792x_set_tsf, + .get_survey = mt76_get_survey, + .get_antenna = mt76_get_antenna, + .set_antenna = mt7925_set_antenna, + .set_coverage_class = mt792x_set_coverage_class, + .hw_scan = mt7925_hw_scan, + .cancel_hw_scan = mt7925_cancel_hw_scan, + .sta_statistics = mt792x_sta_statistics, + .sched_scan_start = mt7925_start_sched_scan, + .sched_scan_stop = mt7925_stop_sched_scan, +#ifdef CONFIG_PM + .suspend = mt7925_suspend, + .resume = mt7925_resume, + .set_wakeup = mt792x_set_wakeup, + .set_rekey_data = mt7925_set_rekey_data, +#endif /* CONFIG_PM */ + .flush = mt792x_flush, + .set_sar_specs = mt7925_set_sar_specs, + .remain_on_channel = mt7925_remain_on_channel, + .cancel_remain_on_channel = mt7925_cancel_remain_on_channel, + .add_chanctx = mt7925_add_chanctx, + .remove_chanctx = mt7925_remove_chanctx, + .change_chanctx = mt7925_change_chanctx, + .assign_vif_chanctx = mt792x_assign_vif_chanctx, + .unassign_vif_chanctx = mt792x_unassign_vif_chanctx, + .mgd_prepare_tx = mt7925_mgd_prepare_tx, + .mgd_complete_tx = mt7925_mgd_complete_tx, +}; +EXPORT_SYMBOL_GPL(mt7925_ops); + +MODULE_AUTHOR("Deren Wu "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c new file mode 100644 index 0000000000..9c0e397537 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -0,0 +1,3174 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include +#include +#include "mt7925.h" +#include "mcu.h" +#include "mac.h" + +#define MT_STA_BFER BIT(0) +#define MT_STA_BFEE BIT(1) + +static bool mt7925_disable_clc; +module_param_named(disable_clc, mt7925_disable_clc, bool, 0644); +MODULE_PARM_DESC(disable_clc, "disable CLC support"); + +int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) +{ + int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); + struct mt7925_mcu_rxd *rxd; + int ret = 0; + + if (!skb) { + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", cmd, seq); + mt792x_reset(mdev); + + return -ETIMEDOUT; + } + + rxd = (struct mt7925_mcu_rxd *)skb->data; + if (seq != rxd->seq) + return -EAGAIN; + + if (cmd == MCU_CMD(PATCH_SEM_CONTROL) || + cmd == MCU_CMD(PATCH_FINISH_REQ)) { + skb_pull(skb, sizeof(*rxd) - 4); + ret = *skb->data; + } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || + cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || + cmd == MCU_UNI_CMD(STA_REC_UPDATE) || + cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(OFFLOAD) || + cmd == MCU_UNI_CMD(SUSPEND)) { + struct mt7925_mcu_uni_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7925_mcu_uni_event *)skb->data; + ret = le32_to_cpu(event->status); + /* skip invalid event */ + if (mcu_cmd != event->cid) + ret = -EAGAIN; + } else { + skb_pull(skb, sizeof(*rxd)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_parse_response); + +int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set) +{ +#define MT_RF_REG_HDR GENMASK(31, 24) +#define MT_RF_REG_ANT GENMASK(23, 16) +#define RF_REG_PREFIX 0x99 + struct { + u8 __rsv[4]; + union { + struct uni_cmd_access_reg_basic { + __le16 tag; + __le16 len; + __le32 idx; + __le32 data; + } __packed reg; + struct uni_cmd_access_rf_reg_basic { + __le16 tag; + __le16 len; + __le16 ant; + u8 __rsv[2]; + __le32 idx; + __le32 data; + } __packed rf_reg; + }; + } __packed * res, req; + struct sk_buff *skb; + int ret; + + if (u32_get_bits(regidx, MT_RF_REG_HDR) == RF_REG_PREFIX) { + req.rf_reg.tag = cpu_to_le16(UNI_CMD_ACCESS_RF_REG_BASIC); + req.rf_reg.len = cpu_to_le16(sizeof(req.rf_reg)); + req.rf_reg.ant = cpu_to_le16(u32_get_bits(regidx, MT_RF_REG_ANT)); + req.rf_reg.idx = cpu_to_le32(regidx); + req.rf_reg.data = set ? cpu_to_le32(*val) : 0; + } else { + req.reg.tag = cpu_to_le16(UNI_CMD_ACCESS_REG_BASIC); + req.reg.len = cpu_to_le16(sizeof(req.reg)); + req.reg.idx = cpu_to_le32(regidx); + req.reg.data = set ? cpu_to_le32(*val) : 0; + } + + if (set) + return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(REG_ACCESS), + &req, sizeof(req), true); + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, + MCU_WM_UNI_CMD_QUERY(REG_ACCESS), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + res = (void *)skb->data; + if (u32_get_bits(regidx, MT_RF_REG_HDR) == RF_REG_PREFIX) + *val = le32_to_cpu(res->rf_reg.data); + else + *val = le32_to_cpu(res->reg.data); + + dev_kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_regval); + +int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, + struct mt76_vif *vif, + struct ieee80211_bss_conf *info) +{ + struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif, + bss_conf); + struct sk_buff *skb; + int i, len = min_t(int, mvif->cfg.arp_addr_cnt, + IEEE80211_BSS_ARP_ADDR_LIST_LEN); + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7925_arpns_tlv arp; + } req = { + .hdr = { + .bss_idx = vif->idx, + }, + .arp = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), + .len = cpu_to_le16(sizeof(req) - 4 + len * 2 * sizeof(__be32)), + .ips_num = len, + .enable = true, + }, + }; + + skb = mt76_mcu_msg_alloc(dev, NULL, sizeof(req) + len * 2 * sizeof(__be32)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req, sizeof(req)); + for (i = 0; i < len; i++) { + skb_put_data(skb, &mvif->cfg.arp_addr_list[i], sizeof(__be32)); + skb_put_zero(skb, sizeof(__be32)); + } + + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true); +} + +#ifdef CONFIG_PM +static int +mt7925_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, + bool suspend, struct cfg80211_wowlan *wowlan) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_dev *dev = phy->dev; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_wow_ctrl_tlv wow_ctrl_tlv; + struct mt76_connac_wow_gpio_param_tlv gpio_tlv; + } req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .wow_ctrl_tlv = { + .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL), + .len = cpu_to_le16(sizeof(struct mt76_connac_wow_ctrl_tlv)), + .cmd = suspend ? 1 : 2, + }, + .gpio_tlv = { + .tag = cpu_to_le16(UNI_SUSPEND_WOW_GPIO_PARAM), + .len = cpu_to_le16(sizeof(struct mt76_connac_wow_gpio_param_tlv)), + .gpio_pin = 0xff, /* follow fw about GPIO pin */ + }, + }; + + if (wowlan->magic_pkt) + req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC; + if (wowlan->disconnect) + req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT | + UNI_WOW_DETECT_TYPE_BCN_LOST); + if (wowlan->nd_config) { + mt7925_mcu_sched_scan_req(phy, vif, wowlan->nd_config); + req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT; + mt7925_mcu_sched_scan_enable(phy, vif, suspend); + } + if (wowlan->n_patterns) + req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP; + + if (mt76_is_mmio(dev)) + req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE; + else if (mt76_is_usb(dev)) + req.wow_ctrl_tlv.wakeup_hif = WOW_USB; + else if (mt76_is_sdio(dev)) + req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO; + + return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, + sizeof(req), true); +} + +static int +mt7925_mcu_set_wow_pattern(struct mt76_dev *dev, + struct ieee80211_vif *vif, + u8 index, bool enable, + struct cfg80211_pkt_pattern *pattern) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt7925_wow_pattern_tlv *tlv; + struct sk_buff *skb; + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr = { + .bss_idx = mvif->idx, + }; + + skb = mt76_mcu_msg_alloc(dev, NULL, sizeof(hdr) + sizeof(*tlv)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &hdr, sizeof(hdr)); + tlv = (struct mt7925_wow_pattern_tlv *)skb_put(skb, sizeof(*tlv)); + tlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN); + tlv->len = cpu_to_le16(sizeof(*tlv)); + tlv->bss_idx = 0xF; + tlv->data_len = pattern->pattern_len; + tlv->enable = enable; + tlv->index = index; + tlv->offset = 0; + + memcpy(tlv->pattern, pattern->pattern, pattern->pattern_len); + memcpy(tlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8)); + + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SUSPEND), true); +} + +void mt7925_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt76_phy *phy = priv; + bool suspend = !test_bit(MT76_STATE_RUNNING, &phy->state); + struct ieee80211_hw *hw = phy->hw; + struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config; + int i; + + mt76_connac_mcu_set_gtk_rekey(phy->dev, vif, suspend); + + mt76_connac_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true); + + for (i = 0; i < wowlan->n_patterns; i++) + mt7925_mcu_set_wow_pattern(phy->dev, vif, i, suspend, + &wowlan->patterns[i]); + mt7925_connac_mcu_set_wow_ctrl(phy, vif, suspend, wowlan); +} + +#endif /* CONFIG_PM */ + +static void +mt7925_mcu_connection_loss_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt7925_uni_beacon_loss_event *event = priv; + + if (mvif->idx != event->hdr.bss_idx) + return; + + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) || + vif->type != NL80211_IFTYPE_STATION) + return; + + ieee80211_connection_loss(vif); +} + +static void +mt7925_mcu_connection_loss_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt7925_uni_beacon_loss_event *event; + struct mt76_phy *mphy = &dev->mt76.phy; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd)); + event = (struct mt7925_uni_beacon_loss_event *)skb->data; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_connection_loss_iter, event); +} + +static void +mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt7925_roc_grant_tlv *grant = priv; + + if (mvif->idx != grant->bss_idx) + return; + + mvif->band_idx = grant->dbdcband; +} + +static void +mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_hw *hw = dev->mt76.hw; + struct mt7925_roc_grant_tlv *grant; + struct mt7925_mcu_rxd *rxd; + int duration; + + rxd = (struct mt7925_mcu_rxd *)skb->data; + grant = (struct mt7925_roc_grant_tlv *)(rxd->tlv + 4); + + /* should never happen */ + WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT)); + + if (grant->reqtype == MT7925_ROC_REQ_ROC) + ieee80211_ready_on_channel(hw); + else if (grant->reqtype == MT7925_ROC_REQ_JOIN) + ieee80211_iterate_active_interfaces_atomic(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_roc_iter, grant); + dev->phy.roc_grant = true; + wake_up(&dev->phy.roc_wait); + duration = le32_to_cpu(grant->max_interval); + mod_timer(&dev->phy.roc_timer, + jiffies + msecs_to_jiffies(duration)); +} + +static void +mt7925_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv; + + spin_lock_bh(&dev->mt76.lock); + __skb_queue_tail(&phy->scan_event_list, skb); + spin_unlock_bh(&dev->mt76.lock); + + ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, + MT792x_HW_SCAN_TIMEOUT); +} + +static void +mt7925_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ +#define UNI_EVENT_TX_DONE_MSG 0 +#define UNI_EVENT_TX_DONE_RAW 1 + struct mt7925_mcu_txs_event { + u8 ver; + u8 rsv[3]; + u8 data[0]; + } __packed * txs; + struct tlv *tlv; + u32 tlv_len; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + tlv = (struct tlv *)skb->data; + tlv_len = skb->len; + + while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) { + switch (le16_to_cpu(tlv->tag)) { + case UNI_EVENT_TX_DONE_RAW: + txs = (struct mt7925_mcu_txs_event *)tlv->data; + mt7925_mac_add_txs(dev, txs->data); + break; + default: + break; + } + tlv_len -= le16_to_cpu(tlv->len); + tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len)); + } +} + +static void +mt7925_mcu_uni_debug_msg_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt7925_uni_debug_msg { + __le16 tag; + __le16 len; + u8 fmt; + u8 rsv[3]; + u8 id; + u8 type:3; + u8 nr_args:5; + union { + struct idxlog { + __le16 rsv; + __le32 ts; + __le32 idx; + u8 data[]; + } __packed idx; + struct txtlog { + u8 len; + u8 rsv; + __le32 ts; + u8 data[]; + } __packed txt; + }; + } __packed * hdr; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + hdr = (struct mt7925_uni_debug_msg *)skb->data; + + if (hdr->id == 0x28) { + skb_pull(skb, offsetof(struct mt7925_uni_debug_msg, id)); + wiphy_info(mt76_hw(dev)->wiphy, "%.*s", skb->len, skb->data); + return; + } else if (hdr->id != 0xa8) { + return; + } + + if (hdr->type == 0) { /* idx log */ + int i, ret, len = PAGE_SIZE - 1, nr_val; + struct page *page = dev_alloc_pages(get_order(len)); + __le32 *val; + char *buf, *cur; + + if (!page) + return; + + buf = page_address(page); + cur = buf; + + nr_val = (le16_to_cpu(hdr->len) - sizeof(*hdr)) / 4; + val = (__le32 *)hdr->idx.data; + for (i = 0; i < nr_val && len > 0; i++) { + ret = snprintf(cur, len, "0x%x,", le32_to_cpu(val[i])); + if (ret <= 0) + break; + + cur += ret; + len -= ret; + } + if (cur > buf) + wiphy_info(mt76_hw(dev)->wiphy, "idx: 0x%X,%d,%s", + le32_to_cpu(hdr->idx.idx), nr_val, buf); + put_page(page); + } else if (hdr->type == 2) { /* str log */ + wiphy_info(mt76_hw(dev)->wiphy, "%.*s", hdr->txt.len, hdr->txt.data); + } +} + +static void +mt7925_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev, + struct sk_buff *skb) +{ + struct mt7925_mcu_rxd *rxd; + + rxd = (struct mt7925_mcu_rxd *)skb->data; + + switch (rxd->eid) { + case MCU_UNI_EVENT_FW_LOG_2_HOST: + mt7925_mcu_uni_debug_msg_event(dev, skb); + break; + case MCU_UNI_EVENT_ROC: + mt7925_mcu_uni_roc_event(dev, skb); + break; + case MCU_UNI_EVENT_SCAN_DONE: + mt7925_mcu_scan_event(dev, skb); + return; + case MCU_UNI_EVENT_TX_DONE: + mt7925_mcu_tx_done_event(dev, skb); + break; + case MCU_UNI_EVENT_BSS_BEACON_LOSS: + mt7925_mcu_connection_loss_event(dev, skb); + break; + case MCU_UNI_EVENT_COREDUMP: + dev->fw_assert = true; + mt76_connac_mcu_coredump_event(&dev->mt76, skb, &dev->coredump); + return; + default: + break; + } + dev_kfree_skb(skb); +} + +void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt7925_mcu_rxd *rxd = (struct mt7925_mcu_rxd *)skb->data; + + if (skb_linearize(skb)) + return; + + if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) { + mt7925_mcu_uni_rx_unsolicited_event(dev, skb); + return; + } + + mt76_mcu_rx_event(&dev->mt76, skb); +} + +static int +mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, + struct ieee80211_ampdu_params *params, + bool enable, bool tx) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv; + struct sta_rec_ba_uni *ba; + struct sk_buff *skb; + struct tlv *tlv; + int len; + + len = sizeof(struct sta_req_hdr) + sizeof(*ba); + skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid, + len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba)); + + ba = (struct sta_rec_ba_uni *)tlv; + ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT; + ba->winsize = cpu_to_le16(params->buf_size); + ba->ssn = cpu_to_le16(params->ssn); + ba->ba_en = enable << params->tid; + ba->amsdu = params->amsdu; + ba->tid = params->tid; + + return mt76_mcu_skb_send_msg(dev, skb, + MCU_UNI_CMD(STA_REC_UPDATE), true); +} + +/** starec & wtbl **/ +int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; + struct mt792x_vif *mvif = msta->vif; + + if (enable && !params->amsdu) + msta->wcid.amsdu = false; + + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + enable, true); +} + +int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; + struct mt792x_vif *mvif = msta->vif; + + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + enable, false); +} + +static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) +{ + const struct mt76_connac2_fw_trailer *hdr; + const struct mt76_connac2_fw_region *region; + const struct mt7925_clc *clc; + struct mt76_dev *mdev = &dev->mt76; + struct mt792x_phy *phy = &dev->phy; + const struct firmware *fw; + int ret, i, len, offset = 0; + u8 *clc_base = NULL; + + if (mt7925_disable_clc || + mt76_is_usb(&dev->mt76)) + return 0; + + ret = request_firmware(&fw, fw_name, mdev->dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(mdev->dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); + for (i = 0; i < hdr->n_region; i++) { + region = (const void *)((const u8 *)hdr - + (hdr->n_region - i) * sizeof(*region)); + len = le32_to_cpu(region->len); + + /* check if we have valid buffer size */ + if (offset + len > fw->size) { + dev_err(mdev->dev, "Invalid firmware region\n"); + ret = -EINVAL; + goto out; + } + + if ((region->feature_set & FW_FEATURE_NON_DL) && + region->type == FW_TYPE_CLC) { + clc_base = (u8 *)(fw->data + offset); + break; + } + offset += len; + } + + if (!clc_base) + goto out; + + for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) { + clc = (const struct mt7925_clc *)(clc_base + offset); + + /* do not init buf again if chip reset triggered */ + if (phy->clc[clc->idx]) + continue; + + phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc, + le32_to_cpu(clc->len), + GFP_KERNEL); + + if (!phy->clc[clc->idx]) { + ret = -ENOMEM; + goto out; + } + } + + ret = mt7925_mcu_set_clc(dev, "00", ENVIRON_INDOOR); +out: + release_firmware(fw); + + return ret; +} + +int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl) +{ + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + u8 ctrl; + u8 interval; + u8 _rsv2[2]; + } __packed req = { + .tag = cpu_to_le16(UNI_WSYS_CONFIG_FW_LOG_CTRL), + .len = cpu_to_le16(sizeof(req) - 4), + .ctrl = ctrl, + }; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(WSYS_CONFIG), + &req, sizeof(req), false, NULL); + return ret; +} + +static void +mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_dev *mdev = mphy->dev; + struct mt7925_mcu_phy_cap { + u8 ht; + u8 vht; + u8 _5g; + u8 max_bw; + u8 nss; + u8 dbdc; + u8 tx_ldpc; + u8 rx_ldpc; + u8 tx_stbc; + u8 rx_stbc; + u8 hw_path; + u8 he; + u8 eht; + } __packed * cap; + enum { + WF0_24G, + WF0_5G + }; + + cap = (struct mt7925_mcu_phy_cap *)data; + + mdev->phy.antenna_mask = BIT(cap->nss) - 1; + mdev->phy.chainmask = mdev->phy.antenna_mask; + mdev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); + mdev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); + dev->has_eht = cap->eht; +} + +static int +mt7925_mcu_get_nic_capability(struct mt792x_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + } __packed req = { + .tag = cpu_to_le16(UNI_CHIP_CONFIG_NIC_CAPA), + .len = cpu_to_le16(sizeof(req) - 4), + }; + struct mt76_connac_cap_hdr { + __le16 n_element; + u8 rsv[2]; + } __packed * hdr; + struct sk_buff *skb; + int ret, i; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(CHIP_CONFIG), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + hdr = (struct mt76_connac_cap_hdr *)skb->data; + if (skb->len < sizeof(*hdr)) { + ret = -EINVAL; + goto out; + } + + skb_pull(skb, sizeof(*hdr)); + + for (i = 0; i < le16_to_cpu(hdr->n_element); i++) { + struct tlv *tlv = (struct tlv *)skb->data; + int len; + + if (skb->len < sizeof(*tlv)) + break; + + len = le16_to_cpu(tlv->len); + if (skb->len < len) + break; + + switch (le16_to_cpu(tlv->tag)) { + case MT_NIC_CAP_6G: + mphy->cap.has_6ghz = !!tlv->data[0]; + break; + case MT_NIC_CAP_MAC_ADDR: + memcpy(mphy->macaddr, (void *)tlv->data, ETH_ALEN); + break; + case MT_NIC_CAP_PHY: + mt7925_mcu_parse_phy_cap(dev, tlv->data); + break; + default: + break; + } + skb_pull(skb, len); + } +out: + dev_kfree_skb(skb); + return ret; +} + +int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd) +{ + u16 len = strlen(cmd) + 1; + struct { + u8 _rsv[4]; + __le16 tag; + __le16 len; + struct mt76_connac_config config; + } __packed req = { + .tag = cpu_to_le16(UNI_CHIP_CONFIG_CHIP_CFG), + .len = cpu_to_le16(sizeof(req) - 4), + .config = { + .resp_type = 0, + .type = 0, + .data_size = cpu_to_le16(len), + }, + }; + + memcpy(req.config.data, cmd, len); + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHIP_CONFIG), + &req, sizeof(req), false); +} + +int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable) +{ + char cmd[16]; + + snprintf(cmd, sizeof(cmd), "KeepFullPwr %d", !enable); + + return mt7925_mcu_chip_config(dev, cmd); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_deep_sleep); + +int mt7925_run_firmware(struct mt792x_dev *dev) +{ + int err; + + err = mt792x_load_firmware(dev); + if (err) + return err; + + err = mt7925_mcu_get_nic_capability(dev); + if (err) + return err; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + err = mt7925_load_clc(dev, mt792x_ram_name(dev)); + if (err) + return err; + + return mt7925_mcu_fw_log_2_host(dev, 1); +} +EXPORT_SYMBOL_GPL(mt7925_run_firmware); + +static void +mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct sta_rec_hdr_trans *hdr_trans; + struct mt76_wcid *wcid; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HDR_TRANS, sizeof(*hdr_trans)); + hdr_trans = (struct sta_rec_hdr_trans *)tlv; + hdr_trans->dis_rx_hdr_tran = true; + + if (vif->type == NL80211_IFTYPE_STATION) + hdr_trans->to_ds = true; + else + hdr_trans->from_ds = true; + + wcid = (struct mt76_wcid *)sta->drv_priv; + if (!wcid) + return; + + hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags); + if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) { + hdr_trans->to_ds = true; + hdr_trans->from_ds = true; + } +} + +int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta; + struct sk_buff *skb; + + msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* starec hdr trans */ + mt7925_mcu_sta_hdr_trans_tlv(skb, vif, sta); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); +} + +int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) +{ +#define MCU_EDCA_AC_PARAM 0 +#define WMM_AIFS_SET BIT(0) +#define WMM_CW_MIN_SET BIT(1) +#define WMM_CW_MAX_SET BIT(2) +#define WMM_TXOP_SET BIT(3) +#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \ + WMM_CW_MAX_SET | WMM_TXOP_SET) + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + u8 bss_idx; + u8 __rsv[3]; + } __packed hdr = { + .bss_idx = mvif->mt76.idx, + }; + struct sk_buff *skb; + int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca); + int ac; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &hdr, sizeof(hdr)); + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; + struct edca *e; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, MCU_EDCA_AC_PARAM, sizeof(*e)); + + e = (struct edca *)tlv; + e->set = WMM_PARAM_SET; + e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS; + e->aifs = q->aifs; + e->txop = cpu_to_le16(q->txop); + + if (q->cw_min) + e->cw_min = fls(q->cw_min); + else + e->cw_min = 5; + + if (q->cw_max) + e->cw_max = fls(q->cw_max); + else + e->cw_max = 10; + } + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(EDCA_UPDATE), true); +} + +static int +mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, + struct mt76_connac_sta_key_conf *sta_key_conf, + struct sk_buff *skb, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd) +{ + struct sta_rec_sec_uni *sec; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec)); + sec = (struct sta_rec_sec_uni *)tlv; + sec->add = cmd; + + if (cmd == SET_KEY) { + struct sec_key_uni *sec_key; + u8 cipher; + + cipher = mt76_connac_mcu_get_cipher(key->cipher); + if (cipher == MCU_CIPHER_NONE) + return -EOPNOTSUPP; + + sec_key = &sec->key[0]; + sec_key->cipher_len = sizeof(*sec_key); + + if (cipher == MCU_CIPHER_BIP_CMAC_128) { + sec_key->wlan_idx = cpu_to_le16(wcid->idx); + sec_key->cipher_id = MCU_CIPHER_AES_CCMP; + sec_key->key_id = sta_key_conf->keyidx; + sec_key->key_len = 16; + memcpy(sec_key->key, sta_key_conf->key, 16); + + sec_key = &sec->key[1]; + sec_key->wlan_idx = cpu_to_le16(wcid->idx); + sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128; + sec_key->cipher_len = sizeof(*sec_key); + sec_key->key_len = 16; + memcpy(sec_key->key, key->key, 16); + sec->n_cipher = 2; + } else { + sec_key->wlan_idx = cpu_to_le16(wcid->idx); + sec_key->cipher_id = cipher; + sec_key->key_id = key->keyidx; + sec_key->key_len = key->keylen; + memcpy(sec_key->key, key->key, key->keylen); + + if (cipher == MCU_CIPHER_TKIP) { + /* Rx/Tx MIC keys are swapped */ + memcpy(sec_key->key + 16, key->key + 24, 8); + memcpy(sec_key->key + 24, key->key + 16, 8); + } + + /* store key_conf for BIP batch update */ + if (cipher == MCU_CIPHER_AES_CCMP) { + memcpy(sta_key_conf->key, key->key, key->keylen); + sta_key_conf->keyidx = key->keyidx; + } + + sec->n_cipher = 1; + } + } else { + sec->n_cipher = 0; + } + + return 0; +} + +int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct mt76_connac_sta_key_conf *sta_key_conf, + struct ieee80211_key_conf *key, int mcu_cmd, + struct mt76_wcid *wcid, enum set_key_cmd cmd) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct sk_buff *skb; + int ret; + + skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd); + if (ret) + return ret; + + return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); +} + +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + struct ieee80211_channel *chan, int duration, + enum mt7925_roc_req type, u8 token_id) +{ + int center_ch = ieee80211_frequency_to_channel(chan->center_freq); + struct mt792x_dev *dev = phy->dev; + struct { + struct { + u8 rsv[4]; + } __packed hdr; + struct roc_acquire_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 control_channel; + u8 sco; + u8 band; + u8 bw; + u8 center_chan; + u8 center_chan2; + u8 bw_from_ap; + u8 center_chan_from_ap; + u8 center_chan2_from_ap; + u8 reqtype; + __le32 maxinterval; + u8 dbdcband; + u8 rsv[3]; + } __packed roc; + } __packed req = { + .roc = { + .tag = cpu_to_le16(UNI_ROC_ACQUIRE), + .len = cpu_to_le16(sizeof(struct roc_acquire_tlv)), + .tokenid = token_id, + .reqtype = type, + .maxinterval = cpu_to_le32(duration), + .bss_idx = vif->mt76.idx, + .control_channel = chan->hw_value, + .bw = CMD_CBW_20MHZ, + .bw_from_ap = CMD_CBW_20MHZ, + .center_chan = center_ch, + .center_chan_from_ap = center_ch, + .dbdcband = 0xff, /* auto */ + }, + }; + + if (chan->hw_value < center_ch) + req.roc.sco = 1; /* SCA */ + else if (chan->hw_value > center_ch) + req.roc.sco = 3; /* SCB */ + + switch (chan->band) { + case NL80211_BAND_6GHZ: + req.roc.band = 3; + break; + case NL80211_BAND_5GHZ: + req.roc.band = 2; + break; + default: + req.roc.band = 1; + break; + } + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), + &req, sizeof(req), false); +} + +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + u8 token_id) +{ + struct mt792x_dev *dev = phy->dev; + struct { + struct { + u8 rsv[4]; + } __packed hdr; + struct roc_abort_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 dbdcband; + u8 rsv[5]; + } __packed abort; + } __packed req = { + .abort = { + .tag = cpu_to_le16(UNI_ROC_ABORT), + .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), + .tokenid = token_id, + .bss_idx = vif->mt76.idx, + .dbdcband = 0xff, /* auto*/ + }, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), + &req, sizeof(req), false); +} + +int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag) +{ + static const u8 ch_band[] = { + [NL80211_BAND_2GHZ] = 0, + [NL80211_BAND_5GHZ] = 1, + [NL80211_BAND_6GHZ] = 2, + }; + struct mt792x_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq1 = chandef->center_freq1; + u8 band_idx = chandef->chan->band != NL80211_BAND_2GHZ; + struct { + /* fixed field */ + u8 __rsv[4]; + + __le16 tag; + __le16 len; + u8 control_ch; + u8 center_ch; + u8 bw; + u8 tx_path_num; + u8 rx_path; /* mask or num */ + u8 switch_reason; + u8 band_idx; + u8 center_ch2; /* for 80+80 only */ + __le16 cac_case; + u8 channel_band; + u8 rsv0; + __le32 outband_freq; + u8 txpower_drop; + u8 ap_bw; + u8 ap_center_ch; + u8 rsv1[53]; + } __packed req = { + .tag = cpu_to_le16(tag), + .len = cpu_to_le16(sizeof(req) - 4), + .control_ch = chandef->chan->hw_value, + .center_ch = ieee80211_frequency_to_channel(freq1), + .bw = mt76_connac_chan_bw(chandef), + .tx_path_num = hweight8(phy->mt76->antenna_mask), + .rx_path = phy->mt76->antenna_mask, + .band_idx = band_idx, + .channel_band = ch_band[chandef->chan->band], + }; + + if (chandef->chan->band == NL80211_BAND_6GHZ) + req.channel_band = 2; + else + req.channel_band = chandef->chan->band; + + if (tag == UNI_CHANNEL_RX_PATH || + dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + req.switch_reason = CH_SWITCH_NORMAL; + else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; + else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, + NL80211_IFTYPE_AP)) + req.switch_reason = CH_SWITCH_DFS; + else + req.switch_reason = CH_SWITCH_NORMAL; + + if (tag == UNI_CHANNEL_SWITCH) + req.rx_path = hweight8(req.rx_path); + + if (chandef->width == NL80211_CHAN_WIDTH_80P80) { + int freq2 = chandef->center_freq2; + + req.center_ch2 = ieee80211_frequency_to_channel(freq2); + } + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHANNEL_SWITCH), + &req, sizeof(req), true); +} + +int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) +{ + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + u8 buffer_mode; + u8 format; + __le16 buf_len; + } __packed req = { + .tag = cpu_to_le16(UNI_EFUSE_BUFFER_MODE), + .len = cpu_to_le16(sizeof(req) - 4), + .buffer_mode = EE_MODE_EFUSE, + .format = EE_FORMAT_WHOLE + }; + + return mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(EFUSE_CTRL), + &req, sizeof(req), false, NULL); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom); + +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct ps_tlv { + __le16 tag; + __le16 len; + u8 ps_state; /* 0: device awake + * 1: static power save + * 2: dynamic power saving + * 3: enter TWT power saving + * 4: leave TWT power saving + */ + u8 pad[3]; + } __packed ps; + } __packed ps_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .ps = { + .tag = cpu_to_le16(UNI_BSS_INFO_PS), + .len = cpu_to_le16(sizeof(struct ps_tlv)), + .ps_state = vif->cfg.ps ? 2 : 0, + }, + }; + + if (vif->type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &ps_req, sizeof(ps_req), true); +} + +static int +mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcnft_tlv { + __le16 tag; + __le16 len; + __le16 bcn_interval; + u8 dtim_period; + u8 bmc_delivered_ac; + u8 bmc_triggered_ac; + u8 pad[3]; + } __packed bcnft; + } __packed bcnft_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .bcnft = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), + .len = cpu_to_le16(sizeof(struct bcnft_tlv)), + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + .dtim_period = vif->bss_conf.dtim_period, + }, + }; + + if (vif->type != NL80211_IFTYPE_STATION) + return 0; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &bcnft_req, sizeof(bcnft_req), true); +} + +int +mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcnft_tlv { + __le16 tag; + __le16 len; + __le16 bcn_interval; + u8 dtim_period; + u8 bmc_delivered_ac; + u8 bmc_triggered_ac; + u8 pad[3]; + } __packed enable; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .enable = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), + .len = cpu_to_le16(sizeof(struct bcnft_tlv)), + .dtim_period = vif->bss_conf.dtim_period, + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + }, + }; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct pm_disable { + __le16 tag; + __le16 len; + } __packed disable; + } req1 = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .disable = { + .tag = cpu_to_le16(UNI_BSS_INFO_PM_DISABLE), + .len = cpu_to_le16(sizeof(struct pm_disable)) + }, + }; + int err; + + err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req1, sizeof(req1), false); + if (err < 0 || !enable) + return err; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req, sizeof(req), false); +} + +static void +mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + if (!sta->deflink.he_cap.has_he) + return; + + mt76_connac_mcu_sta_he_tlv_v2(skb, sta); +} + +static void +mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct sta_rec_he_6g_capa *he_6g; + struct tlv *tlv; + + if (!sta->deflink.he_6ghz_capa.capa) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g)); + + he_6g = (struct sta_rec_he_6g_capa *)tlv; + he_6g->capa = sta->deflink.he_6ghz_capa.capa; +} + +static void +mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct ieee80211_eht_mcs_nss_supp *mcs_map; + struct ieee80211_eht_cap_elem_fixed *elem; + struct sta_rec_eht *eht; + struct tlv *tlv; + + if (!sta->deflink.eht_cap.has_eht) + return; + + mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp; + elem = &sta->deflink.eht_cap.eht_cap_elem; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht)); + + eht = (struct sta_rec_eht *)tlv; + eht->tid_bitmap = 0xff; + eht->mac_cap = cpu_to_le16(*(u16 *)elem->mac_cap_info); + eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info); + eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]); + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20)); + memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80)); + memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160)); +} + +static void +mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct sta_rec_ht *ht; + struct tlv *tlv; + + if (!sta->deflink.ht_cap.ht_supported) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); + + ht = (struct sta_rec_ht *)tlv; + ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); +} + +static void +mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct sta_rec_vht *vht; + struct tlv *tlv; + + /* For 6G band, this tlv is necessary to let hw work normally */ + if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); + + vht = (struct sta_rec_vht *)tlv; + vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); + vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; +} + +static void +mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct sta_rec_amsdu *amsdu; + struct tlv *tlv; + + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + return; + + if (!sta->deflink.agg.max_amsdu_len) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); + amsdu = (struct sta_rec_amsdu *)tlv; + amsdu->max_amsdu_num = 8; + amsdu->amsdu_en = true; + msta->wcid.amsdu = true; + + switch (sta->deflink.agg.max_amsdu_len) { + case IEEE80211_MAX_MPDU_LEN_VHT_11454: + amsdu->max_mpdu_size = + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + return; + case IEEE80211_MAX_MPDU_LEN_HT_7935: + case IEEE80211_MAX_MPDU_LEN_VHT_7991: + amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; + return; + default: + amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; + return; + } +} + +static void +mt7925_mcu_sta_phy_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; + struct sta_rec_phy *phy; + struct tlv *tlv; + u8 af = 0, mm = 0; + + if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy)); + phy = (struct sta_rec_phy *)tlv; + phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta); + if (sta->deflink.ht_cap.ht_supported) { + af = sta->deflink.ht_cap.ampdu_factor; + mm = sta->deflink.ht_cap.ampdu_density; + } + + if (sta->deflink.vht_cap.vht_supported) { + u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, + sta->deflink.vht_cap.cap); + + af = max_t(u8, af, vht_af); + } + + if (sta->deflink.he_6ghz_capa.capa) { + af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + } + + phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR, af) | + FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY, mm); + phy->max_ampdu_len = af; +} + +static void +mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb, + struct ieee80211_sta *sta, + struct ieee80211_vif *vif, + u8 rcpi, u8 sta_state) +{ + struct sta_rec_state_v2 { + __le16 tag; + __le16 len; + u8 state; + u8 rsv[3]; + __le32 flags; + u8 vht_opmode; + u8 action; + u8 rsv2[2]; + } __packed * state; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state)); + state = (struct sta_rec_state_v2 *)tlv; + state->state = sta_state; + + if (sta->deflink.vht_cap.vht_supported) { + state->vht_opmode = sta->deflink.bandwidth; + state->vht_opmode |= sta->deflink.rx_nss << + IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; + } +} + +static void +mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; + enum nl80211_band band = chandef->chan->band; + struct sta_rec_ra_info *ra_info; + struct tlv *tlv; + u16 supp_rates; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); + ra_info = (struct sta_rec_ra_info *)tlv; + + supp_rates = sta->deflink.supp_rates[band]; + if (band == NL80211_BAND_2GHZ) + supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) | + FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf); + else + supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates); + + ra_info->legacy = cpu_to_le16(supp_rates); + + if (sta->deflink.ht_cap.ht_supported) + memcpy(ra_info->rx_mcs_bitmask, + sta->deflink.ht_cap.mcs.rx_mask, + HT_MCS_MASK_NUM); +} + +static void +mt7925_mcu_sta_mld_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + struct sta_rec_mld *mld; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, sizeof(*mld)); + mld = (struct sta_rec_mld *)tlv; + memcpy(mld->mac_addr, vif->addr, ETH_ALEN); + mld->primary_id = cpu_to_le16(wcid->idx); + mld->wlan_id = cpu_to_le16(wcid->idx); + + /* TODO: 0 means deflink only, add secondary link(1) later */ + mld->link_num = !!(hweight8(vif->active_links) > 1); + WARN_ON_ONCE(mld->link_num); +} + +static int +mt7925_mcu_sta_cmd(struct mt76_phy *phy, + struct mt76_sta_cmd_info *info) +{ + struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv; + struct mt76_dev *dev = phy->dev; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + + skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + if (info->sta || !info->offload_fw) + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta, + info->enable, info->newly); + if (info->sta && info->enable) { + mt7925_mcu_sta_phy_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_ht_tlv(skb, info->sta); + mt7925_mcu_sta_vht_tlv(skb, info->sta); + mt76_connac_mcu_sta_uapsd(skb, info->vif, info->sta); + mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_he_tlv(skb, info->sta); + mt7925_mcu_sta_he_6g_tlv(skb, info->sta); + mt7925_mcu_sta_eht_tlv(skb, info->sta); + mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta, + info->vif, info->rcpi, + info->state); + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta); + } + + sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, + sizeof(struct tlv)); + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid, + WTBL_RESET_AND_SET, + sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + if (info->enable) { + mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif, + info->sta, sta_wtbl, + wtbl_hdr); + mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid, + sta_wtbl, wtbl_hdr); + if (info->sta) + mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta, + sta_wtbl, wtbl_hdr, + true, true); + } + + return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); +} + +int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, bool enable, + enum mt76_sta_info_state state) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + int rssi = -ewma_rssi_read(&mvif->rssi); + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .enable = enable, + .cmd = MCU_UNI_CMD(STA_REC_UPDATE), + .state = state, + .offload_fw = true, + .rcpi = to_rcpi(rssi), + }; + struct mt792x_sta *msta; + + msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; + info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; + info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; + + return mt7925_mcu_sta_cmd(&dev->mphy, &info); +} + +int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + bool enable) +{ +#define MT7925_FIF_BIT_CLR BIT(1) +#define MT7925_FIF_BIT_SET BIT(0) + int err = 0; + + if (enable) { + err = mt7925_mcu_uni_bss_bcnft(dev, vif, true); + if (err) + return err; + + return mt7925_mcu_set_rxfilter(dev, 0, + MT7925_FIF_BIT_SET, + MT_WF_RFCR_DROP_OTHER_BEACON); + } + + err = mt7925_mcu_set_bss_pm(dev, vif, false); + if (err) + return err; + + return mt7925_mcu_set_rxfilter(dev, 0, + MT7925_FIF_BIT_CLR, + MT_WF_RFCR_DROP_OTHER_BEACON); +} + +int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txpwr *txpwr) +{ +#define TX_POWER_SHOW_INFO 0x7 +#define TXPOWER_ALL_RATE_POWER_INFO 0x2 + struct mt7925_txpwr_event *event; + struct mt7925_txpwr_req req = { + .tag = cpu_to_le16(TX_POWER_SHOW_INFO), + .len = cpu_to_le16(sizeof(req) - 4), + .catg = TXPOWER_ALL_RATE_POWER_INFO, + .band_idx = band_idx, + }; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(TXPOWER), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + event = (struct mt7925_txpwr_event *)skb->data; + memcpy(txpwr, &event->txpwr, sizeof(event->txpwr)); + + dev_kfree_skb(skb); + + return 0; +} + +int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + struct { + struct { + u8 band_idx; + u8 pad[3]; + } __packed hdr; + struct sniffer_enable_tlv { + __le16 tag; + __le16 len; + u8 enable; + u8 pad[3]; + } __packed enable; + } __packed req = { + .hdr = { + .band_idx = mvif->mt76.band_idx, + }, + .enable = { + .tag = cpu_to_le16(UNI_SNIFFER_ENABLE), + .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)), + .enable = enable, + }, + }; + + mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true); + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), + true); +} + +int mt7925_mcu_config_sniffer(struct mt792x_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt76_phy *mphy = vif->phy->mt76; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &mphy->chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; + + const u8 ch_band[] = { + [NL80211_BAND_2GHZ] = 1, + [NL80211_BAND_5GHZ] = 2, + [NL80211_BAND_6GHZ] = 3, + }; + const u8 ch_width[] = { + [NL80211_CHAN_WIDTH_20_NOHT] = 0, + [NL80211_CHAN_WIDTH_20] = 0, + [NL80211_CHAN_WIDTH_40] = 0, + [NL80211_CHAN_WIDTH_80] = 1, + [NL80211_CHAN_WIDTH_160] = 2, + [NL80211_CHAN_WIDTH_80P80] = 3, + [NL80211_CHAN_WIDTH_5] = 4, + [NL80211_CHAN_WIDTH_10] = 5, + [NL80211_CHAN_WIDTH_320] = 6, + }; + + struct { + struct { + u8 band_idx; + u8 pad[3]; + } __packed hdr; + struct config_tlv { + __le16 tag; + __le16 len; + u16 aid; + u8 ch_band; + u8 bw; + u8 control_ch; + u8 sco; + u8 center_ch; + u8 center_ch2; + u8 drop_err; + u8 pad[3]; + } __packed tlv; + } __packed req = { + .hdr = { + .band_idx = vif->mt76.band_idx, + }, + .tlv = { + .tag = cpu_to_le16(UNI_SNIFFER_CONFIG), + .len = cpu_to_le16(sizeof(req.tlv)), + .control_ch = chandef->chan->hw_value, + .center_ch = ieee80211_frequency_to_channel(freq1), + .drop_err = 1, + }, + }; + + if (chandef->chan->band < ARRAY_SIZE(ch_band)) + req.tlv.ch_band = ch_band[chandef->chan->band]; + if (chandef->width < ARRAY_SIZE(ch_width)) + req.tlv.bw = ch_width[chandef->width]; + + if (freq2) + req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2); + + if (req.tlv.control_ch < req.tlv.center_ch) + req.tlv.sco = 1; /* SCA */ + else if (req.tlv.control_ch > req.tlv.center_ch) + req.tlv.sco = 3; /* SCB */ + + return mt76_mcu_send_msg(mphy->dev, MCU_UNI_CMD(SNIFFER), + &req, sizeof(req), true); +} + +int +mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_mutable_offsets offs; + struct { + struct req_hdr { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcn_content_tlv { + __le16 tag; + __le16 len; + __le16 tim_ie_pos; + __le16 csa_ie_pos; + __le16 bcc_ie_pos; + /* 0: disable beacon offload + * 1: enable beacon offload + * 2: update probe respond offload + */ + u8 enable; + /* 0: legacy format (TXD + payload) + * 1: only cap field IE + */ + u8 type; + __le16 pkt_len; + u8 pkt[512]; + } __packed beacon_tlv; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .beacon_tlv = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), + .len = cpu_to_le16(sizeof(struct bcn_content_tlv)), + .enable = enable, + .type = 1, + }, + }; + struct sk_buff *skb; + u8 cap_offs; + + /* support enable/update process only + * disable flow would be handled in bss stop handler automatically + */ + if (!enable) + return -EOPNOTSUPP; + + skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0); + if (!skb) + return -EINVAL; + + cap_offs = offsetof(struct ieee80211_mgmt, u.beacon.capab_info); + if (!skb_pull(skb, cap_offs)) { + dev_err(dev->mt76.dev, "beacon format err\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + if (skb->len > 512) { + dev_err(dev->mt76.dev, "beacon size limit exceed\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + memcpy(req.beacon_tlv.pkt, skb->data, skb->len); + req.beacon_tlv.pkt_len = cpu_to_le16(skb->len); + offs.tim_offset -= cap_offs; + req.beacon_tlv.tim_ie_pos = cpu_to_le16(offs.tim_offset); + + if (offs.cntdwn_counter_offs[0]) { + u16 csa_offs; + + csa_offs = offs.cntdwn_counter_offs[0] - cap_offs - 4; + req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs); + } + dev_kfree_skb(skb); + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req, sizeof(req), true); +} + +int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_chanctx_conf *ctx) +{ + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; + enum nl80211_band band = chandef->chan->band; + struct mt76_dev *mdev = phy->dev; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct rlm_tlv { + __le16 tag; + __le16 len; + u8 control_channel; + u8 center_chan; + u8 center_chan2; + u8 bw; + u8 tx_streams; + u8 rx_streams; + u8 ht_op_info; + u8 sco; + u8 band; + u8 pad[3]; + } __packed rlm; + } __packed rlm_req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .rlm = { + .tag = cpu_to_le16(UNI_BSS_INFO_RLM), + .len = cpu_to_le16(sizeof(struct rlm_tlv)), + .control_channel = chandef->chan->hw_value, + .center_chan = ieee80211_frequency_to_channel(freq1), + .center_chan2 = ieee80211_frequency_to_channel(freq2), + .tx_streams = hweight8(phy->antenna_mask), + .ht_op_info = 4, /* set HT 40M allowed */ + .rx_streams = hweight8(phy->antenna_mask), + .band = band, + }, + }; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_40: + rlm_req.rlm.bw = CMD_CBW_40MHZ; + break; + case NL80211_CHAN_WIDTH_80: + rlm_req.rlm.bw = CMD_CBW_80MHZ; + break; + case NL80211_CHAN_WIDTH_80P80: + rlm_req.rlm.bw = CMD_CBW_8080MHZ; + break; + case NL80211_CHAN_WIDTH_160: + rlm_req.rlm.bw = CMD_CBW_160MHZ; + break; + case NL80211_CHAN_WIDTH_5: + rlm_req.rlm.bw = CMD_CBW_5MHZ; + break; + case NL80211_CHAN_WIDTH_10: + rlm_req.rlm.bw = CMD_CBW_10MHZ; + break; + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + default: + rlm_req.rlm.bw = CMD_CBW_20MHZ; + rlm_req.rlm.ht_op_info = 0; + break; + } + + if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 1; /* SCA */ + else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 3; /* SCB */ + + return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req, + sizeof(rlm_req), true); +} + +static struct sk_buff * +__mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len) +{ + struct bss_req_hdr hdr = { + .bss_idx = mvif->idx, + }; + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(dev, NULL, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_put_data(skb, &hdr, sizeof(hdr)); + + return skb; +} + +static u8 +mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta) +{ + struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_sta_eht_cap *eht_cap; + __le16 capa = 0; + u8 mode = 0; + + if (sta) { + he_6ghz_capa = &sta->deflink.he_6ghz_capa; + eht_cap = &sta->deflink.eht_cap; + } else { + struct ieee80211_supported_band *sband; + + sband = phy->hw->wiphy->bands[band]; + capa = ieee80211_get_he_6ghz_capa(sband, vif->type); + he_6ghz_capa = (struct ieee80211_he_6ghz_capa *)&capa; + + eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type); + } + + switch (band) { + case NL80211_BAND_2GHZ: + if (eht_cap && eht_cap->has_eht) + mode |= PHY_MODE_BE_24G; + break; + case NL80211_BAND_5GHZ: + if (eht_cap && eht_cap->has_eht) + mode |= PHY_MODE_BE_5G; + break; + case NL80211_BAND_6GHZ: + if (he_6ghz_capa && he_6ghz_capa->capa) + mode |= PHY_MODE_AX_6G; + + if (eht_cap && eht_cap->has_eht) + mode |= PHY_MODE_BE_6G; + break; + default: + break; + } + + return mode; +} + +static void +mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_chanctx_conf *ctx, + struct mt76_phy *phy, u16 wlan_idx, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : + &mvif->sta; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + enum nl80211_band band = chandef->chan->band; + struct mt76_connac_bss_basic_tlv *basic_req; + u8 idx, basic_phy; + struct tlv *tlv; + int conn_type; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req)); + basic_req = (struct mt76_connac_bss_basic_tlv *)tlv; + + idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 : + mvif->mt76.omac_idx; + basic_req->hw_bss_idx = idx; + + basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta); + + basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, sta); + basic_req->nonht_basic_phy = cpu_to_le16(basic_phy); + + memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN); + basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta); + basic_req->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); + basic_req->dtim_period = vif->bss_conf.dtim_period; + basic_req->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx); + basic_req->sta_idx = cpu_to_le16(msta->wcid.idx); + basic_req->omac_idx = mvif->mt76.omac_idx; + basic_req->band_idx = mvif->mt76.band_idx; + basic_req->wmm_idx = mvif->mt76.wmm_idx; + basic_req->conn_state = !enable; + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + if (vif->p2p) + conn_type = CONNECTION_P2P_GO; + else + conn_type = CONNECTION_INFRA_AP; + basic_req->conn_type = cpu_to_le32(conn_type); + basic_req->active = enable; + break; + case NL80211_IFTYPE_STATION: + if (vif->p2p) + conn_type = CONNECTION_P2P_GC; + else + conn_type = CONNECTION_INFRA_STA; + basic_req->conn_type = cpu_to_le32(conn_type); + basic_req->active = true; + break; + case NL80211_IFTYPE_ADHOC: + basic_req->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + basic_req->active = true; + break; + default: + WARN_ON(1); + break; + } +} + +static void +mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct bss_sec_tlv { + __le16 tag; + __le16 len; + u8 mode; + u8 status; + u8 cipher; + u8 __rsv; + } __packed * sec; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_SEC, sizeof(*sec)); + sec = (struct bss_sec_tlv *)tlv; + + switch (mvif->cipher) { + case MCU_CIPHER_GCMP_256: + case MCU_CIPHER_GCMP: + sec->mode = MODE_WPA3_SAE; + sec->status = 8; + break; + case MCU_CIPHER_AES_CCMP: + sec->mode = MODE_WPA2_PSK; + sec->status = 6; + break; + case MCU_CIPHER_TKIP: + sec->mode = MODE_WPA2_PSK; + sec->status = 4; + break; + case MCU_CIPHER_WEP104: + case MCU_CIPHER_WEP40: + sec->mode = MODE_SHARED; + sec->status = 0; + break; + default: + sec->mode = MODE_OPEN; + sec->status = 1; + break; + } + + sec->cipher = mvif->cipher; +} + +static void +mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy, + struct ieee80211_chanctx_conf *ctx, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->mt76->chandef; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + enum nl80211_band band = chandef->chan->band; + struct bss_rate_tlv *bmc; + struct tlv *tlv; + u8 idx = mvif->mcast_rates_idx ? + mvif->mcast_rates_idx : mvif->basic_rates_idx; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RATE, sizeof(*bmc)); + + bmc = (struct bss_rate_tlv *)tlv; + + bmc->short_preamble = (band == NL80211_BAND_2GHZ); + bmc->bc_fixed_rate = idx; + bmc->mc_fixed_rate = idx; +} + +static void +mt7925_mcu_bss_mld_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + bool is_mld = ieee80211_vif_is_mld(vif); + struct bss_mld_tlv *mld; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld)); + mld = (struct bss_mld_tlv *)tlv; + + mld->link_id = sta ? (is_mld ? vif->bss_conf.link_id : 0) : 0xff; + mld->group_mld_id = is_mld ? mvif->mt76.idx : 0xff; + mld->own_mld_id = mvif->mt76.idx + 32; + mld->remap_idx = 0xff; + + if (sta) + memcpy(mld->mac_addr, sta->addr, ETH_ALEN); +} + +static void +mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +{ + struct mt76_connac_bss_qos_tlv *qos; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_QBSS, sizeof(*qos)); + qos = (struct mt76_connac_bss_qos_tlv *)tlv; + qos->qos = vif->bss_conf.qos; +} + +static void +mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt792x_phy *phy) +{ +#define DEFAULT_HE_PE_DURATION 4 +#define DEFAULT_HE_DURATION_RTS_THRES 1023 + const struct ieee80211_sta_he_cap *cap; + struct bss_info_uni_he *he; + struct tlv *tlv; + + cap = mt76_connac_get_he_phy_cap(phy->mt76, vif); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he)); + + he = (struct bss_info_uni_he *)tlv; + he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; + if (!he->he_pe_duration) + he->he_pe_duration = DEFAULT_HE_PE_DURATION; + + he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); + if (!he->he_rts_thres) + he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); + + he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80; + he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160; + he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; +} + +static void +mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + bool enable) +{ + struct bss_info_uni_bss_color *color; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BSS_COLOR, sizeof(*color)); + color = (struct bss_info_uni_bss_color *)tlv; + + color->enable = enable ? + vif->bss_conf.he_bss_color.enabled : 0; + color->bss_color = enable ? + vif->bss_conf.he_bss_color.color : 0; +} + +int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, + struct ieee80211_chanctx_conf *ctx, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + int enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = phy->dev; + struct sk_buff *skb; + int err; + + skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, + MT7925_BSS_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* bss_basic must be first */ + mt7925_mcu_bss_basic_tlv(skb, vif, sta, ctx, phy->mt76, + mvif->sta.wcid.idx, enable); + mt7925_mcu_bss_sec_tlv(skb, vif); + + mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta); + mt7925_mcu_bss_qos_tlv(skb, vif); + mt7925_mcu_bss_mld_tlv(skb, vif, sta); + + if (vif->bss_conf.he_support) { + mt7925_mcu_bss_he_tlv(skb, vif, phy); + mt7925_mcu_bss_color_tlv(skb, vif, enable); + } + + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(BSS_INFO_UPDATE), true); + if (err < 0) + return err; + + return mt7925_mcu_set_chctx(phy->mt76, &mvif->mt76, ctx); +} + +int mt7925_mcu_set_dbdc(struct mt76_phy *phy) +{ + struct mt76_dev *mdev = phy->dev; + + struct mbmc_conf_tlv *conf; + struct mbmc_set_req *hdr; + struct sk_buff *skb; + struct tlv *tlv; + int max_len, err; + + max_len = sizeof(*hdr) + sizeof(*conf); + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + hdr = (struct mbmc_set_req *)skb_put(skb, sizeof(*hdr)); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_MBMC_SETTING, sizeof(*conf)); + conf = (struct mbmc_conf_tlv *)tlv; + + conf->mbmc_en = 1; + conf->band = 0; /* unused */ + + err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS), + false); + + return err; +} + +#define MT76_CONNAC_SCAN_CHANNEL_TIME 60 + +int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_scan_request *sreq = &scan_req->req; + int n_ssids = 0, err, i, duration; + struct ieee80211_channel **scan_list = sreq->channels; + struct mt76_dev *mdev = phy->dev; + struct mt76_connac_mcu_scan_channel *chan; + struct sk_buff *skb; + + struct scan_hdr_tlv *hdr; + struct scan_req_tlv *req; + struct scan_ssid_tlv *ssid; + struct scan_bssid_tlv *bssid; + struct scan_chan_info_tlv *chan_info; + struct scan_ie_tlv *ie; + struct scan_misc_tlv *misc; + struct tlv *tlv; + int max_len; + + max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) + + sizeof(*bssid) + sizeof(*chan_info) + + sizeof(*misc) + sizeof(*ie); + + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + set_bit(MT76_HW_SCANNING, &phy->state); + mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; + + hdr = (struct scan_hdr_tlv *)skb_put(skb, sizeof(*hdr)); + hdr->seq_num = mvif->scan_seq_num | mvif->band_idx << 7; + hdr->bss_idx = mvif->idx; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_REQ, sizeof(*req)); + req = (struct scan_req_tlv *)tlv; + req->scan_type = sreq->n_ssids ? 1 : 0; + req->probe_req_num = sreq->n_ssids ? 2 : 0; + + duration = MT76_CONNAC_SCAN_CHANNEL_TIME; + /* increase channel time for passive scan */ + if (!sreq->n_ssids) + duration *= 2; + req->timeout_value = cpu_to_le16(sreq->n_channels * duration); + req->channel_min_dwell_time = cpu_to_le16(duration); + req->channel_dwell_time = cpu_to_le16(duration); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID, sizeof(*ssid)); + ssid = (struct scan_ssid_tlv *)tlv; + for (i = 0; i < sreq->n_ssids; i++) { + if (!sreq->ssids[i].ssid_len) + continue; + + ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid, + sreq->ssids[i].ssid_len); + n_ssids++; + } + ssid->ssid_type = n_ssids ? BIT(2) : BIT(0); + ssid->ssids_num = n_ssids; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid)); + bssid = (struct scan_bssid_tlv *)tlv; + + memcpy(bssid->bssid, sreq->bssid, ETH_ALEN); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_CHANNEL, sizeof(*chan_info)); + chan_info = (struct scan_chan_info_tlv *)tlv; + chan_info->channels_num = min_t(u8, sreq->n_channels, + ARRAY_SIZE(chan_info->channels)); + for (i = 0; i < chan_info->channels_num; i++) { + chan = &chan_info->channels[i]; + + switch (scan_list[i]->band) { + case NL80211_BAND_2GHZ: + chan->band = 1; + break; + case NL80211_BAND_6GHZ: + chan->band = 3; + break; + default: + chan->band = 2; + break; + } + chan->channel_num = scan_list[i]->hw_value; + } + chan_info->channel_type = sreq->n_channels ? 4 : 0; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_IE, sizeof(*ie)); + ie = (struct scan_ie_tlv *)tlv; + if (sreq->ie_len > 0) { + memcpy(ie->ies, sreq->ie, sreq->ie_len); + ie->ies_len = cpu_to_le16(sreq->ie_len); + } + + req->scan_func |= SCAN_FUNC_SPLIT_SCAN; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_MISC, sizeof(*misc)); + misc = (struct scan_misc_tlv *)tlv; + if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + get_random_mask_addr(misc->random_mac, sreq->mac_addr, + sreq->mac_addr_mask); + req->scan_func |= SCAN_FUNC_RANDOM_MAC; + } + + err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), + false); + if (err < 0) + clear_bit(MT76_HW_SCANNING, &phy->state); + + return err; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_hw_scan); + +int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct ieee80211_channel **scan_list = sreq->channels; + struct mt76_connac_mcu_scan_channel *chan; + struct mt76_dev *mdev = phy->dev; + struct cfg80211_match_set *cfg_match; + struct cfg80211_ssid *cfg_ssid; + + struct scan_hdr_tlv *hdr; + struct scan_sched_req *req; + struct scan_ssid_tlv *ssid; + struct scan_chan_info_tlv *chan_info; + struct scan_ie_tlv *ie; + struct scan_sched_ssid_match_sets *match; + struct sk_buff *skb; + struct tlv *tlv; + int i, max_len; + + max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) + + sizeof(*chan_info) + sizeof(*ie) + + sizeof(*match); + + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; + + hdr = (struct scan_hdr_tlv *)skb_put(skb, sizeof(*hdr)); + hdr->seq_num = mvif->scan_seq_num | mvif->band_idx << 7; + hdr->bss_idx = mvif->idx; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SCHED_REQ, sizeof(*req)); + req = (struct scan_sched_req *)tlv; + req->version = 1; + + if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + req->scan_func |= SCAN_FUNC_RANDOM_MAC; + + req->intervals_num = sreq->n_scan_plans; + for (i = 0; i < req->intervals_num; i++) + req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID, sizeof(*ssid)); + ssid = (struct scan_ssid_tlv *)tlv; + + ssid->ssids_num = sreq->n_ssids; + ssid->ssid_type = BIT(2); + for (i = 0; i < ssid->ssids_num; i++) { + cfg_ssid = &sreq->ssids[i]; + memcpy(ssid->ssids[i].ssid, cfg_ssid->ssid, cfg_ssid->ssid_len); + ssid->ssids[i].ssid_len = cpu_to_le32(cfg_ssid->ssid_len); + } + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID_MATCH_SETS, sizeof(*match)); + match = (struct scan_sched_ssid_match_sets *)tlv; + match->match_num = sreq->n_match_sets; + for (i = 0; i < match->match_num; i++) { + cfg_match = &sreq->match_sets[i]; + memcpy(match->match[i].ssid, cfg_match->ssid.ssid, + cfg_match->ssid.ssid_len); + match->match[i].rssi_th = cpu_to_le32(cfg_match->rssi_thold); + match->match[i].ssid_len = cfg_match->ssid.ssid_len; + } + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_CHANNEL, sizeof(*chan_info)); + chan_info = (struct scan_chan_info_tlv *)tlv; + chan_info->channels_num = min_t(u8, sreq->n_channels, + ARRAY_SIZE(chan_info->channels)); + for (i = 0; i < chan_info->channels_num; i++) { + chan = &chan_info->channels[i]; + + switch (scan_list[i]->band) { + case NL80211_BAND_2GHZ: + chan->band = 1; + break; + case NL80211_BAND_6GHZ: + chan->band = 3; + break; + default: + chan->band = 2; + break; + } + chan->channel_num = scan_list[i]->hw_value; + } + chan_info->channel_type = sreq->n_channels ? 4 : 0; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_IE, sizeof(*ie)); + ie = (struct scan_ie_tlv *)tlv; + if (sreq->ie_len > 0) { + memcpy(ie->ies, sreq->ie, sreq->ie_len); + ie->ies_len = cpu_to_le16(sreq->ie_len); + } + + return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), + false); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_sched_scan_req); + +int +mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt76_dev *mdev = phy->dev; + struct scan_sched_enable *req; + struct scan_hdr_tlv *hdr; + struct sk_buff *skb; + struct tlv *tlv; + int max_len; + + max_len = sizeof(*hdr) + sizeof(*req); + + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + hdr = (struct scan_hdr_tlv *)skb_put(skb, sizeof(*hdr)); + hdr->seq_num = 0; + hdr->bss_idx = 0; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SCHED_ENABLE, sizeof(*req)); + req = (struct scan_sched_enable *)tlv; + req->active = !enable; + + if (enable) + set_bit(MT76_HW_SCHED_SCANNING, &phy->state); + else + clear_bit(MT76_HW_SCHED_SCANNING, &phy->state); + + return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), + false); +} + +int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, + struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct { + struct scan_hdr { + u8 seq_num; + u8 bss_idx; + u8 pad[2]; + } __packed hdr; + struct scan_cancel_tlv { + __le16 tag; + __le16 len; + u8 is_ext_channel; + u8 rsv[3]; + } __packed cancel; + } req = { + .hdr = { + .seq_num = mvif->scan_seq_num, + .bss_idx = mvif->idx, + }, + .cancel = { + .tag = cpu_to_le16(UNI_SCAN_CANCEL), + .len = cpu_to_le16(sizeof(struct scan_cancel_tlv)), + }, + }; + + if (test_and_clear_bit(MT76_HW_SCANNING, &phy->state)) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(phy->hw, &info); + } + + return mt76_mcu_send_msg(phy->dev, MCU_UNI_CMD(SCAN_REQ), + &req, sizeof(req), false); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_cancel_hw_scan); + +int mt7925_mcu_set_channel_domain(struct mt76_phy *phy) +{ + int len, i, n_max_channels, n_2ch = 0, n_5ch = 0, n_6ch = 0; + struct { + struct { + u8 alpha2[4]; /* regulatory_request.alpha2 */ + u8 bw_2g; /* BW_20_40M 0 + * BW_20M 1 + * BW_20_40_80M 2 + * BW_20_40_80_160M 3 + * BW_20_40_80_8080M 4 + */ + u8 bw_5g; + u8 bw_6g; + u8 pad; + } __packed hdr; + struct n_chan { + __le16 tag; + __le16 len; + u8 n_2ch; + u8 n_5ch; + u8 n_6ch; + u8 pad; + } __packed n_ch; + } req = { + .hdr = { + .bw_2g = 0, + .bw_5g = 3, /* BW_20_40_80_160M */ + .bw_6g = 3, + }, + .n_ch = { + .tag = cpu_to_le16(2), + }, + }; + struct mt76_connac_mcu_chan { + __le16 hw_value; + __le16 pad; + __le32 flags; + } __packed channel; + struct mt76_dev *dev = phy->dev; + struct ieee80211_channel *chan; + struct sk_buff *skb; + + n_max_channels = phy->sband_2g.sband.n_channels + + phy->sband_5g.sband.n_channels + + phy->sband_6g.sband.n_channels; + len = sizeof(req) + n_max_channels * sizeof(channel); + + skb = mt76_mcu_msg_alloc(dev, NULL, len); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, sizeof(req)); + + for (i = 0; i < phy->sband_2g.sband.n_channels; i++) { + chan = &phy->sband_2g.sband.channels[i]; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + n_2ch++; + } + for (i = 0; i < phy->sband_5g.sband.n_channels; i++) { + chan = &phy->sband_5g.sband.channels[i]; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + n_5ch++; + } + for (i = 0; i < phy->sband_6g.sband.n_channels; i++) { + chan = &phy->sband_6g.sband.channels[i]; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + n_6ch++; + } + + BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(req.hdr.alpha2)); + memcpy(req.hdr.alpha2, dev->alpha2, sizeof(dev->alpha2)); + req.n_ch.n_2ch = n_2ch; + req.n_ch.n_5ch = n_5ch; + req.n_ch.n_6ch = n_6ch; + len = sizeof(struct n_chan) + (n_2ch + n_5ch + n_6ch) * sizeof(channel); + req.n_ch.len = cpu_to_le16(len); + memcpy(__skb_push(skb, sizeof(req)), &req, sizeof(req)); + + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SET_DOMAIN_INFO), + false); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_channel_domain); + +static int +__mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, + enum environment_cap env_cap, + struct mt7925_clc *clc, u8 idx) +{ + struct mt7925_clc_segment *seg; + struct sk_buff *skb; + struct { + u8 rsv[4]; + __le16 tag; + __le16 len; + + u8 ver; + u8 pad0; + __le16 size; + u8 idx; + u8 env; + u8 acpi_conf; + u8 pad1; + u8 alpha2[2]; + u8 type[2]; + u8 rsvd[64]; + } __packed req = { + .tag = cpu_to_le16(0x3), + .len = cpu_to_le16(sizeof(req) - 4), + + .idx = idx, + .env = env_cap, + .acpi_conf = mt792x_acpi_get_flags(&dev->phy), + }; + int ret, valid_cnt = 0; + u8 i, *pos; + + if (!clc) + return 0; + + pos = clc->data + sizeof(*seg) * clc->nr_seg; + for (i = 0; i < clc->nr_country; i++) { + struct mt7925_clc_rule *rule = (struct mt7925_clc_rule *)pos; + + pos += sizeof(*rule); + if (rule->alpha2[0] != alpha2[0] || + rule->alpha2[1] != alpha2[1]) + continue; + + seg = (struct mt7925_clc_segment *)clc->data + + rule->seg_idx - 1; + + memcpy(req.alpha2, rule->alpha2, 2); + memcpy(req.type, rule->type, 2); + + req.size = cpu_to_le16(seg->len); + skb = __mt76_mcu_msg_alloc(&dev->mt76, &req, + le16_to_cpu(req.size) + sizeof(req), + sizeof(req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_put_data(skb, clc->data + seg->offset, seg->len); + + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(SET_POWER_LIMIT), + true); + if (ret < 0) + return ret; + valid_cnt++; + } + + if (!valid_cnt) + return -ENOENT; + + return 0; +} + +int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, + enum environment_cap env_cap) +{ + struct mt792x_phy *phy = (struct mt792x_phy *)&dev->phy; + int i, ret; + + /* submit all clc config */ + for (i = 0; i < ARRAY_SIZE(phy->clc); i++) { + ret = __mt7925_mcu_set_clc(dev, alpha2, env_cap, + phy->clc[i], i); + + /* If no country found, set "00" as default */ + if (ret == -ENOENT) + ret = __mt7925_mcu_set_clc(dev, "00", + ENVIRON_INDOOR, + phy->clc[i], i); + if (ret < 0) + return ret; + } + return 0; +} + +int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq) +{ + int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); + struct mt76_connac2_mcu_uni_txd *uni_txd; + struct mt76_connac2_mcu_txd *mcu_txd; + __le32 *txd; + u32 val; + u8 seq; + + /* TODO: make dynamic based on msg type */ + mdev->mcu.timeout = 20 * HZ; + + seq = ++mdev->mcu.msg_seq & 0xf; + if (!seq) + seq = ++mdev->mcu.msg_seq & 0xf; + + if (cmd == MCU_CMD(FW_SCATTER)) + goto exit; + + txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd); + txd = (__le32 *)skb_push(skb, txd_len); + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) | + FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) | + FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0); + txd[0] = cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD); + txd[1] = cpu_to_le32(val); + + if (cmd & __MCU_CMD_FIELD_UNI) { + uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd; + uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd)); + uni_txd->option = MCU_CMD_UNI_EXT_ACK; + uni_txd->cid = cpu_to_le16(mcu_cmd); + uni_txd->s2d_index = MCU_S2D_H2N; + uni_txd->pkt_type = MCU_PKT_ID; + uni_txd->seq = seq; + + goto exit; + } + + mcu_txd = (struct mt76_connac2_mcu_txd *)txd; + mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd)); + mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, + MT_TX_MCU_PORT_RX_Q0)); + mcu_txd->pkt_type = MCU_PKT_ID; + mcu_txd->seq = seq; + mcu_txd->cid = mcu_cmd; + mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd); + + if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) { + if (cmd & __MCU_CMD_FIELD_QUERY) + mcu_txd->set_query = MCU_Q_QUERY; + else + mcu_txd->set_query = MCU_Q_SET; + mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid; + } else { + mcu_txd->set_query = MCU_Q_NA; + } + + if (cmd & __MCU_CMD_FIELD_WA) + mcu_txd->s2d_index = MCU_S2D_H2C; + else + mcu_txd->s2d_index = MCU_S2D_H2N; + +exit: + if (wait_seq) + *wait_seq = seq; + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_fill_message); + +int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val) +{ + struct { + u8 band_idx; + u8 _rsv[3]; + + __le16 tag; + __le16 len; + __le32 len_thresh; + __le32 pkt_thresh; + } __packed req = { + .band_idx = phy->mt76->band_idx, + .tag = cpu_to_le16(UNI_BAND_CONFIG_RTS_THRESHOLD), + .len = cpu_to_le16(sizeof(req) - 4), + .len_thresh = cpu_to_le32(val), + .pkt_thresh = cpu_to_le32(0x2), + }; + + return mt76_mcu_send_msg(&phy->dev->mt76, MCU_UNI_CMD(BAND_CONFIG), + &req, sizeof(req), true); +} + +int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable) +{ + struct { + u8 band_idx; + u8 _rsv[3]; + + __le16 tag; + __le16 len; + u8 enable; + u8 _rsv2[3]; + } __packed req = { + .band_idx = phy->mt76->band_idx, + .tag = cpu_to_le16(UNI_BAND_CONFIG_RADIO_ENABLE), + .len = cpu_to_le16(sizeof(req) - 4), + .enable = enable, + }; + + return mt76_mcu_send_msg(&phy->dev->mt76, MCU_UNI_CMD(BAND_CONFIG), + &req, sizeof(req), true); +} + +static void +mt7925_mcu_build_sku(struct mt76_dev *dev, s8 *sku, + struct mt76_power_limits *limits, + enum nl80211_band band) +{ + int i, offset = sizeof(limits->cck); + + memset(sku, 127, MT_CONNAC3_SKU_POWER_LIMIT); + + if (band == NL80211_BAND_2GHZ) { + /* cck */ + memcpy(sku, limits->cck, sizeof(limits->cck)); + } + + /* ofdm */ + memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm)); + offset += (sizeof(limits->ofdm) * 5); + + /* ht */ + for (i = 0; i < 2; i++) { + memcpy(&sku[offset], limits->mcs[i], 8); + offset += 8; + } + sku[offset++] = limits->mcs[0][0]; + + /* vht */ + for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) { + memcpy(&sku[offset], limits->mcs[i], + ARRAY_SIZE(limits->mcs[i])); + offset += 12; + } + + /* he */ + for (i = 0; i < ARRAY_SIZE(limits->ru); i++) { + memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i])); + offset += ARRAY_SIZE(limits->ru[i]); + } + + /* eht */ + for (i = 0; i < ARRAY_SIZE(limits->eht); i++) { + memcpy(&sku[offset], limits->eht[i], ARRAY_SIZE(limits->eht[i])); + offset += ARRAY_SIZE(limits->eht[i]); + } +} + +static int +mt7925_mcu_rate_txpower_band(struct mt76_phy *phy, + enum nl80211_band band) +{ + int tx_power, n_chan, last_ch, err = 0, idx = 0; + int i, sku_len, batch_size, batch_len = 3; + struct mt76_dev *dev = phy->dev; + static const u8 chan_list_2ghz[] = { + 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14 + }; + static const u8 chan_list_5ghz[] = { + 36, 38, 40, 42, 44, 46, 48, + 50, 52, 54, 56, 58, 60, 62, + 64, 100, 102, 104, 106, 108, 110, + 112, 114, 116, 118, 120, 122, 124, + 126, 128, 132, 134, 136, 138, 140, + 142, 144, 149, 151, 153, 155, 157, + 159, 161, 165, 167 + }; + static const u8 chan_list_6ghz[] = { + 1, 3, 5, 7, 9, 11, 13, + 15, 17, 19, 21, 23, 25, 27, + 29, 33, 35, 37, 39, 41, 43, + 45, 47, 49, 51, 53, 55, 57, + 59, 61, 65, 67, 69, 71, 73, + 75, 77, 79, 81, 83, 85, 87, + 89, 91, 93, 97, 99, 101, 103, + 105, 107, 109, 111, 113, 115, 117, + 119, 121, 123, 125, 129, 131, 133, + 135, 137, 139, 141, 143, 145, 147, + 149, 151, 153, 155, 157, 161, 163, + 165, 167, 169, 171, 173, 175, 177, + 179, 181, 183, 185, 187, 189, 193, + 195, 197, 199, 201, 203, 205, 207, + 209, 211, 213, 215, 217, 219, 221, + 225, 227, 229, 233 + }; + struct mt76_power_limits *limits; + struct mt7925_sku_tlv *sku_tlbv; + const u8 *ch_list; + + sku_len = sizeof(*sku_tlbv); + tx_power = 2 * phy->hw->conf.power_level; + if (!tx_power) + tx_power = 127; + + if (band == NL80211_BAND_2GHZ) { + n_chan = ARRAY_SIZE(chan_list_2ghz); + ch_list = chan_list_2ghz; + last_ch = chan_list_2ghz[ARRAY_SIZE(chan_list_2ghz) - 1]; + } else if (band == NL80211_BAND_6GHZ) { + n_chan = ARRAY_SIZE(chan_list_6ghz); + ch_list = chan_list_6ghz; + last_ch = chan_list_6ghz[ARRAY_SIZE(chan_list_6ghz) - 1]; + } else { + n_chan = ARRAY_SIZE(chan_list_5ghz); + ch_list = chan_list_5ghz; + last_ch = chan_list_5ghz[ARRAY_SIZE(chan_list_5ghz) - 1]; + } + batch_size = DIV_ROUND_UP(n_chan, batch_len); + + limits = devm_kmalloc(dev->dev, sizeof(*limits), GFP_KERNEL); + if (!limits) + return -ENOMEM; + + sku_tlbv = devm_kmalloc(dev->dev, sku_len, GFP_KERNEL); + if (!sku_tlbv) { + devm_kfree(dev->dev, limits); + return -ENOMEM; + } + + for (i = 0; i < batch_size; i++) { + struct mt7925_tx_power_limit_tlv *tx_power_tlv; + int j, msg_len, num_ch; + struct sk_buff *skb; + + num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len; + msg_len = sizeof(*tx_power_tlv) + num_ch * sku_len; + skb = mt76_mcu_msg_alloc(dev, NULL, msg_len); + if (!skb) { + err = -ENOMEM; + goto out; + } + + tx_power_tlv = (struct mt7925_tx_power_limit_tlv *) + skb_put(skb, sizeof(*tx_power_tlv)); + + BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv->alpha2)); + memcpy(tx_power_tlv->alpha2, dev->alpha2, sizeof(dev->alpha2)); + tx_power_tlv->n_chan = num_ch; + tx_power_tlv->tag = cpu_to_le16(0x1); + tx_power_tlv->len = cpu_to_le16(sizeof(*tx_power_tlv)); + + switch (band) { + case NL80211_BAND_2GHZ: + tx_power_tlv->band = 1; + break; + case NL80211_BAND_6GHZ: + tx_power_tlv->band = 3; + break; + default: + tx_power_tlv->band = 2; + break; + } + + for (j = 0; j < num_ch; j++, idx++) { + struct ieee80211_channel chan = { + .hw_value = ch_list[idx], + .band = band, + }; + s8 reg_power, sar_power; + + reg_power = mt76_connac_get_ch_power(phy, &chan, + tx_power); + sar_power = mt76_get_sar_power(phy, &chan, reg_power); + + mt76_get_rate_power_limits(phy, &chan, limits, + sar_power); + + tx_power_tlv->last_msg = ch_list[idx] == last_ch; + sku_tlbv->channel = ch_list[idx]; + + mt7925_mcu_build_sku(dev, sku_tlbv->pwr_limit, + limits, band); + skb_put_data(skb, sku_tlbv, sku_len); + } + err = mt76_mcu_skb_send_msg(dev, skb, + MCU_UNI_CMD(SET_POWER_LIMIT), + true); + if (err < 0) + goto out; + } + +out: + devm_kfree(dev->dev, sku_tlbv); + devm_kfree(dev->dev, limits); + return err; +} + +int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy) +{ + int err; + + if (phy->cap.has_2ghz) { + err = mt7925_mcu_rate_txpower_band(phy, + NL80211_BAND_2GHZ); + if (err < 0) + return err; + } + + if (phy->cap.has_5ghz) { + err = mt7925_mcu_rate_txpower_band(phy, + NL80211_BAND_5GHZ); + if (err < 0) + return err; + } + + if (phy->cap.has_6ghz) { + err = mt7925_mcu_rate_txpower_band(phy, + NL80211_BAND_6GHZ); + if (err < 0) + return err; + } + + return 0; +} + +int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, + u8 bit_op, u32 bit_map) +{ + struct mt792x_phy *phy = &dev->phy; + struct { + u8 band_idx; + u8 rsv1[3]; + + __le16 tag; + __le16 len; + u8 mode; + u8 rsv2[3]; + __le32 fif; + __le32 bit_map; /* bit_* for bitmap update */ + u8 bit_op; + u8 pad[51]; + } __packed req = { + .band_idx = phy->mt76->band_idx, + .tag = cpu_to_le16(UNI_BAND_CONFIG_SET_MAC80211_RX_FILTER), + .len = cpu_to_le16(sizeof(req) - 4), + + .mode = fif ? 0 : 1, + .fif = cpu_to_le32(fif), + .bit_map = cpu_to_le32(bit_map), + .bit_op = bit_op, + }; + + return mt76_mcu_send_msg(&phy->dev->mt76, MCU_UNI_CMD(BAND_CONFIG), + &req, sizeof(req), true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h new file mode 100644 index 0000000000..3c41e21303 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -0,0 +1,537 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_MCU_H +#define __MT7925_MCU_H + +#include "../mt76_connac_mcu.h" + +/* ext event table */ +enum { + MCU_EXT_EVENT_RATE_REPORT = 0x87, +}; + +struct mt7925_mcu_eeprom_info { + __le32 addr; + __le32 valid; + u8 data[MT7925_EEPROM_BLOCK_SIZE]; +} __packed; + +#define MT_RA_RATE_NSS GENMASK(8, 6) +#define MT_RA_RATE_MCS GENMASK(3, 0) +#define MT_RA_RATE_TX_MODE GENMASK(12, 9) +#define MT_RA_RATE_DCM_EN BIT(4) +#define MT_RA_RATE_BW GENMASK(14, 13) + +struct mt7925_mcu_rxd { + __le32 rxd[8]; + + __le16 len; + __le16 pkt_type_id; + + u8 eid; + u8 seq; + u8 option; + u8 __rsv; + + u8 ext_eid; + u8 __rsv1[2]; + u8 s2d_index; + + u8 tlv[]; +}; + +struct mt7925_mcu_uni_event { + u8 cid; + u8 pad[3]; + __le32 status; /* 0: success, others: fail */ +} __packed; + +enum { + MT_EBF = BIT(0), /* explicit beamforming */ + MT_IBF = BIT(1) /* implicit beamforming */ +}; + +struct mt7925_mcu_reg_event { + __le32 reg; + __le32 val; +} __packed; + +struct mt7925_mcu_ant_id_config { + u8 ant_id[4]; +} __packed; + +struct mt7925_txpwr_req { + u8 _rsv[4]; + __le16 tag; + __le16 len; + + u8 format_id; + u8 catg; + u8 band_idx; + u8 _rsv1; +} __packed; + +struct mt7925_txpwr_event { + u8 rsv[4]; + __le16 tag; + __le16 len; + + u8 catg; + u8 band_idx; + u8 ch_band; + u8 format; /* 0:Legacy, 1:HE */ + + /* Rate power info */ + struct mt7925_txpwr txpwr; + + s8 pwr_max; + s8 pwr_min; + u8 rsv1; +} __packed; + +enum { + TM_SWITCH_MODE, + TM_SET_AT_CMD, + TM_QUERY_AT_CMD, +}; + +enum { + MT7925_TM_NORMAL, + MT7925_TM_TESTMODE, + MT7925_TM_ICAP, + MT7925_TM_ICAP_OVERLAP, + MT7925_TM_WIFISPECTRUM, +}; + +struct mt7925_rftest_cmd { + u8 action; + u8 rsv[3]; + __le32 param0; + __le32 param1; +} __packed; + +struct mt7925_rftest_evt { + __le32 param0; + __le32 param1; +} __packed; + +enum { + UNI_CHANNEL_SWITCH, + UNI_CHANNEL_RX_PATH, +}; + +enum { + UNI_CHIP_CONFIG_CHIP_CFG = 0x2, + UNI_CHIP_CONFIG_NIC_CAPA = 0x3, +}; + +enum { + UNI_BAND_CONFIG_RADIO_ENABLE, + UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08, + UNI_BAND_CONFIG_SET_MAC80211_RX_FILTER = 0x0C, +}; + +enum { + UNI_WSYS_CONFIG_FW_LOG_CTRL, + UNI_WSYS_CONFIG_FW_DBG_CTRL, +}; + +enum { + UNI_EFUSE_ACCESS = 1, + UNI_EFUSE_BUFFER_MODE, + UNI_EFUSE_FREE_BLOCK, + UNI_EFUSE_BUFFER_RD, +}; + +enum { + UNI_CMD_ACCESS_REG_BASIC = 0x0, + UNI_CMD_ACCESS_RF_REG_BASIC, +}; + +enum { + UNI_MBMC_SETTING, +}; + +enum { + UNI_EVENT_SCAN_DONE_BASIC = 0, + UNI_EVENT_SCAN_DONE_CHNLINFO = 2, + UNI_EVENT_SCAN_DONE_NLO = 3, +}; + +struct mt7925_mcu_scan_chinfo_event { + u8 nr_chan; + u8 alpha2[3]; +} __packed; + +enum { + UNI_SCAN_REQ = 1, + UNI_SCAN_CANCEL = 2, + UNI_SCAN_SCHED_REQ = 3, + UNI_SCAN_SCHED_ENABLE = 4, + UNI_SCAN_SSID = 10, + UNI_SCAN_BSSID, + UNI_SCAN_CHANNEL, + UNI_SCAN_IE, + UNI_SCAN_MISC, + UNI_SCAN_SSID_MATCH_SETS, +}; + +enum { + UNI_SNIFFER_ENABLE, + UNI_SNIFFER_CONFIG, +}; + +struct scan_hdr_tlv { + /* fixed field */ + u8 seq_num; + u8 bss_idx; + u8 pad[2]; + /* tlv */ + u8 data[]; +} __packed; + +struct scan_req_tlv { + __le16 tag; + __le16 len; + + u8 scan_type; /* 0: PASSIVE SCAN + * 1: ACTIVE SCAN + */ + u8 probe_req_num; /* Number of probe request for each SSID */ + u8 scan_func; /* BIT(0) Enable random MAC scan + * BIT(1) Disable DBDC scan type 1~3. + * BIT(2) Use DBDC scan type 3 (dedicated one RF to scan). + */ + u8 src_mask; + __le16 channel_min_dwell_time; + __le16 channel_dwell_time; /* channel Dwell interval */ + __le16 timeout_value; + __le16 probe_delay_time; + u8 func_mask_ext; +}; + +struct scan_ssid_tlv { + __le16 tag; + __le16 len; + + u8 ssid_type; /* BIT(0) wildcard SSID + * BIT(1) P2P wildcard SSID + * BIT(2) specified SSID + wildcard SSID + * BIT(2) + ssid_type_ext BIT(0) specified SSID only + */ + u8 ssids_num; + u8 pad[2]; + struct mt76_connac_mcu_scan_ssid ssids[4]; +}; + +struct scan_bssid_tlv { + __le16 tag; + __le16 len; + + u8 bssid[ETH_ALEN]; + u8 match_ch; + u8 match_ssid_ind; + u8 rcpi; + u8 pad[3]; +}; + +struct scan_chan_info_tlv { + __le16 tag; + __le16 len; + + u8 channel_type; /* 0: Full channels + * 1: Only 2.4GHz channels + * 2: Only 5GHz channels + * 3: P2P social channel only (channel #1, #6 and #11) + * 4: Specified channels + * Others: Reserved + */ + u8 channels_num; /* valid when channel_type is 4 */ + u8 pad[2]; + struct mt76_connac_mcu_scan_channel channels[64]; +}; + +struct scan_ie_tlv { + __le16 tag; + __le16 len; + + __le16 ies_len; + u8 band; + u8 pad; + u8 ies[MT76_CONNAC_SCAN_IE_LEN]; +}; + +struct scan_misc_tlv { + __le16 tag; + __le16 len; + + u8 random_mac[ETH_ALEN]; + u8 rsv[2]; +}; + +struct scan_sched_req { + __le16 tag; + __le16 len; + + u8 version; + u8 stop_on_match; + u8 intervals_num; + u8 scan_func; + __le16 intervals[MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL]; +}; + +struct scan_sched_ssid_match_sets { + __le16 tag; + __le16 len; + + u8 match_num; + u8 rsv[3]; + + struct mt76_connac_mcu_scan_match match[MT76_CONNAC_MAX_SCAN_MATCH]; +}; + +struct scan_sched_enable { + __le16 tag; + __le16 len; + + u8 active; + u8 rsv[3]; +}; + +struct mbmc_set_req { + u8 pad[4]; + u8 data[]; +} __packed; + +struct mbmc_conf_tlv { + __le16 tag; + __le16 len; + + u8 mbmc_en; + u8 band; + u8 pad[2]; +} __packed; + +struct edca { + __le16 tag; + __le16 len; + + u8 queue; + u8 set; + u8 cw_min; + u8 cw_max; + __le16 txop; + u8 aifs; + u8 __rsv; +}; + +struct bss_req_hdr { + u8 bss_idx; + u8 __rsv[3]; +} __packed; + +struct bss_rate_tlv { + __le16 tag; + __le16 len; + u8 __rsv1[4]; + __le16 bc_trans; + __le16 mc_trans; + u8 short_preamble; + u8 bc_fixed_rate; + u8 mc_fixed_rate; + u8 __rsv2; +} __packed; + +struct bss_mld_tlv { + __le16 tag; + __le16 len; + u8 group_mld_id; + u8 own_mld_id; + u8 mac_addr[ETH_ALEN]; + u8 remap_idx; + u8 link_id; + u8 __rsv[2]; +} __packed; + +struct sta_rec_ba_uni { + __le16 tag; + __le16 len; + u8 tid; + u8 ba_type; + u8 amsdu; + u8 ba_en; + __le16 ssn; + __le16 winsize; + u8 ba_rdd_rro; + u8 __rsv[3]; +} __packed; + +struct sta_rec_eht { + __le16 tag; + __le16 len; + u8 tid_bitmap; + u8 _rsv; + __le16 mac_cap; + __le64 phy_cap; + __le64 phy_cap_ext; + u8 mcs_map_bw20[4]; + u8 mcs_map_bw80[3]; + u8 mcs_map_bw160[3]; + u8 mcs_map_bw320[3]; + u8 _rsv2[3]; +} __packed; + +struct sec_key_uni { + __le16 wlan_idx; + u8 mgmt_prot; + u8 cipher_id; + u8 cipher_len; + u8 key_id; + u8 key_len; + u8 need_resp; + u8 key[32]; +} __packed; + +struct sta_rec_sec_uni { + __le16 tag; + __le16 len; + u8 add; + u8 n_cipher; + u8 rsv[2]; + + struct sec_key_uni key[2]; +} __packed; + +struct sta_rec_hdr_trans { + __le16 tag; + __le16 len; + u8 from_ds; + u8 to_ds; + u8 dis_rx_hdr_tran; + u8 rsv; +} __packed; + +struct sta_rec_mld { + __le16 tag; + __le16 len; + u8 mac_addr[ETH_ALEN]; + __le16 primary_id; + __le16 secondary_id; + __le16 wlan_id; + u8 link_num; + u8 rsv[3]; + struct { + __le16 wlan_id; + u8 bss_idx; + u8 rsv; + } __packed link[2]; +} __packed; + +#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ + sizeof(struct sta_rec_basic) + \ + sizeof(struct sta_rec_bf) + \ + sizeof(struct sta_rec_ht) + \ + sizeof(struct sta_rec_he_v2) + \ + sizeof(struct sta_rec_ba_uni) + \ + sizeof(struct sta_rec_vht) + \ + sizeof(struct sta_rec_uapsd) + \ + sizeof(struct sta_rec_amsdu) + \ + sizeof(struct sta_rec_bfee) + \ + sizeof(struct sta_rec_phy) + \ + sizeof(struct sta_rec_ra) + \ + sizeof(struct sta_rec_sec) + \ + sizeof(struct sta_rec_ra_fixed) + \ + sizeof(struct sta_rec_he_6g_capa) + \ + sizeof(struct sta_rec_eht) + \ + sizeof(struct sta_rec_hdr_trans) + \ + sizeof(struct sta_rec_mld) + \ + sizeof(struct tlv)) + +#define MT7925_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \ + sizeof(struct mt76_connac_bss_basic_tlv) + \ + sizeof(struct mt76_connac_bss_qos_tlv) + \ + sizeof(struct bss_rate_tlv) + \ + sizeof(struct bss_mld_tlv) + \ + sizeof(struct bss_info_uni_he) + \ + sizeof(struct bss_info_uni_bss_color) + \ + sizeof(struct tlv)) + +#define MT_CONNAC3_SKU_POWER_LIMIT 449 +struct mt7925_sku_tlv { + u8 channel; + s8 pwr_limit[MT_CONNAC3_SKU_POWER_LIMIT]; +} __packed; + +struct mt7925_tx_power_limit_tlv { + u8 rsv[4]; + + __le16 tag; + __le16 len; + + /* DW0 - common info*/ + u8 ver; + u8 pad0; + __le16 rsv1; + /* DW1 - cmd hint */ + u8 n_chan; /* # channel */ + u8 band; /* 2.4GHz - 5GHz - 6GHz */ + u8 last_msg; + u8 limit_type; + /* DW3 */ + u8 alpha2[4]; /* regulatory_request.alpha2 */ + u8 pad2[32]; + + u8 data[]; +} __packed; + +struct mt7925_arpns_tlv { + __le16 tag; + __le16 len; + + u8 enable; + u8 ips_num; + u8 rsv[2]; +} __packed; + +struct mt7925_wow_pattern_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 index; /* pattern index */ + u8 enable; /* 0: disable + * 1: enable + */ + u8 data_len; /* pattern length */ + u8 offset; + u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN]; + u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN]; + u8 rsv[4]; +} __packed; + +int mt7925_mcu_set_dbdc(struct mt76_phy *phy); +int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req); +int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, + struct ieee80211_vif *vif); +int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq); +int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, + struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, + struct ieee80211_chanctx_conf *ctx, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + int enable); +int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable); +int mt7925_mcu_set_channel_domain(struct mt76_phy *phy); +int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable); +int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_chanctx_conf *ctx); +int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy); +int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, + struct mt76_vif *vif, + struct ieee80211_bss_conf *info); +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h new file mode 100644 index 0000000000..33785f526a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_H +#define __MT7925_H + +#include "../mt792x.h" +#include "regs.h" + +#define MT7925_BEACON_RATES_TBL 25 + +#define MT7925_TX_RING_SIZE 2048 +#define MT7925_TX_MCU_RING_SIZE 256 +#define MT7925_TX_FWDL_RING_SIZE 128 + +#define MT7925_RX_RING_SIZE 1536 +#define MT7925_RX_MCU_RING_SIZE 512 + +#define MT7925_EEPROM_SIZE 3584 +#define MT7925_TOKEN_SIZE 8192 + +#define MT7925_EEPROM_BLOCK_SIZE 16 + +#define MT7925_SKU_RATE_NUM 161 +#define MT7925_SKU_MAX_DELTA_IDX MT7925_SKU_RATE_NUM +#define MT7925_SKU_TABLE_SIZE (MT7925_SKU_RATE_NUM + 1) + +#define MCU_UNI_EVENT_ROC 0x27 + +enum { + UNI_ROC_ACQUIRE, + UNI_ROC_ABORT, + UNI_ROC_NUM +}; + +enum mt7925_roc_req { + MT7925_ROC_REQ_JOIN, + MT7925_ROC_REQ_ROC, + MT7925_ROC_REQ_NUM +}; + +enum { + UNI_EVENT_ROC_GRANT = 0, + UNI_EVENT_ROC_TAG_NUM +}; + +struct mt7925_roc_grant_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 status; + u8 primarychannel; + u8 rfsco; + u8 rfband; + u8 channelwidth; + u8 centerfreqseg1; + u8 centerfreqseg2; + u8 reqtype; + u8 dbdcband; + u8 rsv[1]; + __le32 max_interval; +} __packed; + +struct mt7925_beacon_loss_tlv { + __le16 tag; + __le16 len; + u8 reason; + u8 nr_btolink; + u8 pad[2]; +} __packed; + +struct mt7925_uni_beacon_loss_event { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7925_beacon_loss_tlv beacon_loss; +} __packed; + +#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) +#define to_rcpi(rssi) (2 * (rssi) + 220) + +enum mt7925_txq_id { + MT7925_TXQ_BAND0, + MT7925_TXQ_BAND1, + MT7925_TXQ_MCU_WM = 15, + MT7925_TXQ_FWDL, +}; + +enum mt7925_rxq_id { + MT7925_RXQ_BAND0 = 2, + MT7925_RXQ_BAND1, + MT7925_RXQ_MCU_WM = 0, + MT7925_RXQ_MCU_WM2, /* for tx done */ +}; + +enum { + MODE_OPEN = 0, + MODE_SHARED = 1, + MODE_WPA = 3, + MODE_WPA_PSK = 4, + MODE_WPA_NONE = 5, + MODE_WPA2 = 6, + MODE_WPA2_PSK = 7, + MODE_WPA3_SAE = 11, +}; + +enum { + MT7925_CLC_POWER, + MT7925_CLC_CHAN, + MT7925_CLC_MAX_NUM, +}; + +struct mt7925_clc_rule { + u8 alpha2[2]; + u8 type[2]; + u8 seg_idx; + u8 rsv[3]; +} __packed; + +struct mt7925_clc_segment { + u8 idx; + u8 rsv1[3]; + u32 offset; + u32 len; + u8 rsv2[4]; +} __packed; + +struct mt7925_clc { + __le32 len; + u8 idx; + u8 ver; + u8 nr_country; + u8 type; + u8 nr_seg; + u8 rsv[7]; + u8 data[]; +} __packed; + +enum mt7925_eeprom_field { + MT_EE_CHIP_ID = 0x000, + MT_EE_VERSION = 0x002, + MT_EE_MAC_ADDR = 0x004, + __MT_EE_MAX = 0x9ff +}; + +enum { + TXPWR_USER, + TXPWR_EEPROM, + TXPWR_MAC, + TXPWR_MAX_NUM, +}; + +struct mt7925_txpwr { + s8 cck[4][2]; + s8 ofdm[8][2]; + s8 ht20[8][2]; + s8 ht40[9][2]; + s8 vht20[12][2]; + s8 vht40[12][2]; + s8 vht80[12][2]; + s8 vht160[12][2]; + s8 he26[12][2]; + s8 he52[12][2]; + s8 he106[12][2]; + s8 he242[12][2]; + s8 he484[12][2]; + s8 he996[12][2]; + s8 he996x2[12][2]; + s8 eht26[16][2]; + s8 eht52[16][2]; + s8 eht106[16][2]; + s8 eht242[16][2]; + s8 eht484[16][2]; + s8 eht996[16][2]; + s8 eht996x2[16][2]; + s8 eht996x4[16][2]; + s8 eht26_52[16][2]; + s8 eht26_106[16][2]; + s8 eht484_242[16][2]; + s8 eht996_484[16][2]; + s8 eht996_484_242[16][2]; + s8 eht996x2_484[16][2]; + s8 eht996x3[16][2]; + s8 eht996x3_484[16][2]; +}; + +extern const struct ieee80211_ops mt7925_ops; + +int __mt7925_start(struct mt792x_phy *phy); +int mt7925_register_device(struct mt792x_dev *dev); +void mt7925_unregister_device(struct mt792x_dev *dev); +int mt7925_run_firmware(struct mt792x_dev *dev); +int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, bool enable, + enum mt76_sta_info_state state); +int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag); +int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif); +int mt7925_mcu_set_eeprom(struct mt792x_dev *dev); +int mt7925_mcu_get_rx_rate(struct mt792x_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct rate_info *rate); +int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl); +void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb); +int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd); +int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, + u8 bit_op, u32 bit_map); + +int mt7925_mac_init(struct mt792x_dev *dev); +int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask); +void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7925_mac_reset_work(struct work_struct *work); +int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); + +void mt7925_tx_token_put(struct mt792x_dev *dev); +bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len); +void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb, u32 *info); +void mt7925_stats_work(struct work_struct *work); +void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy); +int mt7925_init_debugfs(struct mt792x_dev *dev); + +int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); +int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); +void mt7925_scan_work(struct work_struct *work); +void mt7925_roc_work(struct work_struct *work); +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif); +void mt7925_coredump_work(struct work_struct *work); +int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, + struct mt7925_txpwr *txpwr); +void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev, + u8 tbl_idx, u16 rate_idx); +void mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, int pid, + enum mt76_txq_id qid, u32 changed); +void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, + struct ieee80211_sta *sta, bool clear_status, + struct list_head *free_list); +int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq); + +int mt7925e_mac_reset(struct mt792x_dev *dev); +int mt7925e_mcu_init(struct mt792x_dev *dev); +void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data); +void mt7925_set_runtime_pm(struct mt792x_dev *dev); +void mt7925_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif); +void mt7925_connac_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif); +void mt7925_set_ipv6_ns_work(struct work_struct *work); + +int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_config_sniffer(struct mt792x_vif *vif, + struct ieee80211_chanctx_conf *ctx); + +int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); +void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + struct mt76_queue_entry *e); +bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); + +int mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + bool enable); +int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar); + +int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set); +int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, + enum environment_cap env_cap); +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + struct ieee80211_channel *chan, int duration, + enum mt7925_roc_req type, u8 token_id); +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + u8 token_id); +int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq); +int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct mt76_connac_sta_key_conf *sta_key_conf, + struct ieee80211_key_conf *key, int mcu_cmd, + struct mt76_wcid *wcid, enum set_key_cmd cmd); +int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val); +int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c new file mode 100644 index 0000000000..08ef75e24e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include +#include +#include + +#include "mt7925.h" +#include "mac.h" +#include "mcu.h" +#include "../dma.h" + +static const struct pci_device_id mt7925_pci_device_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7925), + .driver_data = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0717), + .driver_data = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + { }, +}; + +static bool mt7925_disable_aspm; +module_param_named(disable_aspm, mt7925_disable_aspm, bool, 0644); +MODULE_PARM_DESC(disable_aspm, "disable PCI ASPM support"); + +static int mt7925e_init_reset(struct mt792x_dev *dev) +{ + return mt792x_wpdma_reset(dev, true); +} + +static void mt7925e_unregister_device(struct mt792x_dev *dev) +{ + int i; + struct mt76_connac_pm *pm = &dev->pm; + + cancel_work_sync(&dev->init_work); + mt76_unregister_device(&dev->mt76); + mt76_for_each_q_rx(&dev->mt76, i) + napi_disable(&dev->mt76.napi[i]); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + cancel_work_sync(&dev->reset_work); + + mt7925_tx_token_put(dev); + __mt792x_mcu_drv_pmctrl(dev); + mt792x_dma_cleanup(dev); + mt792x_wfsys_reset(dev); + skb_queue_purge(&dev->mt76.mcu.res_q); + + tasklet_disable(&dev->mt76.irq_tasklet); +} + +static void mt7925_reg_remap_restore(struct mt792x_dev *dev) +{ + /* remap to ori status */ + if (unlikely(dev->backup_l1)) { + dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->backup_l1); + dev->backup_l1 = 0; + } + + if (dev->backup_l2) { + dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->backup_l2); + dev->backup_l2 = 0; + } +} + +static u32 mt7925_reg_map_l1(struct mt792x_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); + + dev->backup_l1 = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, + MT_HIF_REMAP_L1_MASK, + FIELD_PREP(MT_HIF_REMAP_L1_MASK, base)); + + /* use read to push write */ + dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + return MT_HIF_REMAP_BASE_L1 + offset; +} + +static u32 mt7925_reg_map_l2(struct mt792x_dev *dev, u32 addr) +{ + u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, MT_HIF_REMAP_BASE_L2); + + dev->backup_l2 = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, + MT_HIF_REMAP_L1_MASK, + FIELD_PREP(MT_HIF_REMAP_L1_MASK, base)); + + dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, addr); + /* use read to push write */ + dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + return MT_HIF_REMAP_BASE_L1; +} + +static u32 __mt7925_reg_addr(struct mt792x_dev *dev, u32 addr) +{ + static const struct mt76_connac_reg_map fixed_map[] = { + { 0x830c0000, 0x000000, 0x0001000 }, /* WF_MCU_BUS_CR_REMAP */ + { 0x54000000, 0x002000, 0x0001000 }, /* WFDMA PCIE0 MCU DMA0 */ + { 0x55000000, 0x003000, 0x0001000 }, /* WFDMA PCIE0 MCU DMA1 */ + { 0x56000000, 0x004000, 0x0001000 }, /* WFDMA reserved */ + { 0x57000000, 0x005000, 0x0001000 }, /* WFDMA MCU wrap CR */ + { 0x58000000, 0x006000, 0x0001000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ + { 0x59000000, 0x007000, 0x0001000 }, /* WFDMA PCIE1 MCU DMA1 */ + { 0x820c0000, 0x008000, 0x0004000 }, /* WF_UMAC_TOP (PLE) */ + { 0x820c8000, 0x00c000, 0x0002000 }, /* WF_UMAC_TOP (PSE) */ + { 0x820cc000, 0x00e000, 0x0002000 }, /* WF_UMAC_TOP (PP) */ + { 0x74030000, 0x010000, 0x0001000 }, /* PCIe MAC */ + { 0x820e0000, 0x020000, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ + { 0x820e1000, 0x020400, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ + { 0x820e2000, 0x020800, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ + { 0x820e3000, 0x020c00, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ + { 0x820e4000, 0x021000, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ + { 0x820e5000, 0x021400, 0x0000800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ + { 0x820ce000, 0x021c00, 0x0000200 }, /* WF_LMAC_TOP (WF_SEC) */ + { 0x820e7000, 0x021e00, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ + { 0x820cf000, 0x022000, 0x0001000 }, /* WF_LMAC_TOP (WF_PF) */ + { 0x820e9000, 0x023400, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ + { 0x820ea000, 0x024000, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ + { 0x820eb000, 0x024200, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ + { 0x820ec000, 0x024600, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ + { 0x820ed000, 0x024800, 0x0000800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ + { 0x820ca000, 0x026000, 0x0002000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */ + { 0x820d0000, 0x030000, 0x0010000 }, /* WF_LMAC_TOP (WF_WTBLON) */ + { 0x40000000, 0x070000, 0x0010000 }, /* WF_UMAC_SYSRAM */ + { 0x00400000, 0x080000, 0x0010000 }, /* WF_MCU_SYSRAM */ + { 0x00410000, 0x090000, 0x0010000 }, /* WF_MCU_SYSRAM (configure register) */ + { 0x820f0000, 0x0a0000, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ + { 0x820f1000, 0x0a0600, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ + { 0x820f2000, 0x0a0800, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ + { 0x820f3000, 0x0a0c00, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */ + { 0x820f4000, 0x0a1000, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */ + { 0x820f5000, 0x0a1400, 0x0000800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */ + { 0x820f7000, 0x0a1e00, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */ + { 0x820f9000, 0x0a3400, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ + { 0x820fa000, 0x0a4000, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */ + { 0x820fb000, 0x0a4200, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */ + { 0x820fc000, 0x0a4600, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_INT) */ + { 0x820fd000, 0x0a4800, 0x0000800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */ + { 0x820c4000, 0x0a8000, 0x0004000 }, /* WF_LMAC_TOP BN1 (WF_MUCOP) */ + { 0x820b0000, 0x0ae000, 0x0001000 }, /* [APB2] WFSYS_ON */ + { 0x80020000, 0x0b0000, 0x0010000 }, /* WF_TOP_MISC_OFF */ + { 0x81020000, 0x0c0000, 0x0010000 }, /* WF_TOP_MISC_ON */ + { 0x7c020000, 0x0d0000, 0x0010000 }, /* CONN_INFRA, wfdma */ + { 0x7c060000, 0x0e0000, 0x0010000 }, /* CONN_INFRA, conn_host_csr_top */ + { 0x7c000000, 0x0f0000, 0x0010000 }, /* CONN_INFRA */ + { 0x70020000, 0x1f0000, 0x0010000 }, /* Reserved for CBTOP, can't switch */ + { 0x7c500000, 0x060000, 0x2000000 }, /* remap */ + { 0x0, 0x0, 0x0 } /* End */ + }; + int i; + + if (addr < 0x200000) + return addr; + + mt7925_reg_remap_restore(dev); + + for (i = 0; i < ARRAY_SIZE(fixed_map); i++) { + u32 ofs; + + if (addr < fixed_map[i].phys) + continue; + + ofs = addr - fixed_map[i].phys; + if (ofs > fixed_map[i].size) + continue; + + return fixed_map[i].maps + ofs; + } + + if ((addr >= 0x18000000 && addr < 0x18c00000) || + (addr >= 0x70000000 && addr < 0x78000000) || + (addr >= 0x7c000000 && addr < 0x7c400000)) + return mt7925_reg_map_l1(dev, addr); + + return mt7925_reg_map_l2(dev, addr); +} + +static u32 mt7925_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 addr = __mt7925_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7925_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 addr = __mt7925_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7925_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 addr = __mt7925_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + +static int mt7925_dma_init(struct mt792x_dev *dev) +{ + int ret; + + mt76_dma_attach(&dev->mt76); + + ret = mt792x_dma_disable(dev, true); + if (ret) + return ret; + + /* init tx queue */ + ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7925_TXQ_BAND0, + MT7925_TX_RING_SIZE, + MT_TX_RING_BASE, 0); + if (ret) + return ret; + + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4); + + /* command to WM */ + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7925_TXQ_MCU_WM, + MT7925_TX_MCU_RING_SIZE, MT_TX_RING_BASE); + if (ret) + return ret; + + /* firmware download */ + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7925_TXQ_FWDL, + MT7925_TX_FWDL_RING_SIZE, MT_TX_RING_BASE); + if (ret) + return ret; + + /* rx event */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], + MT7925_RXQ_MCU_WM, MT7925_RX_MCU_RING_SIZE, + MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE); + if (ret) + return ret; + + /* rx data */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], + MT7925_RXQ_BAND0, MT7925_RX_RING_SIZE, + MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE); + if (ret) + return ret; + + ret = mt76_init_queues(dev, mt792x_poll_rx); + if (ret < 0) + return ret; + + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt792x_poll_tx); + napi_enable(&dev->mt76.tx_napi); + + return mt792x_dma_enable(dev); +} + +static int mt7925_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + /* txwi_size = txd size + txp size */ + .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_hw_txp), + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ | + MT_DRV_AMSDU_OFFLOAD, + .survey_flags = SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BSS_RX, + .token_size = MT7925_TOKEN_SIZE, + .tx_prepare_skb = mt7925e_tx_prepare_skb, + .tx_complete_skb = mt76_connac_tx_complete_skb, + .rx_check = mt7925_rx_check, + .rx_skb = mt7925_queue_rx_skb, + .rx_poll_complete = mt792x_rx_poll_complete, + .sta_add = mt7925_mac_sta_add, + .sta_assoc = mt7925_mac_sta_assoc, + .sta_remove = mt7925_mac_sta_remove, + .update_survey = mt792x_update_channel, + }; + static const struct mt792x_hif_ops mt7925_pcie_ops = { + .init_reset = mt7925e_init_reset, + .reset = mt7925e_mac_reset, + .mcu_init = mt7925e_mcu_init, + .drv_own = mt792xe_mcu_drv_pmctrl, + .fw_own = mt792xe_mcu_fw_pmctrl, + }; + static const struct mt792x_irq_map irq_map = { + .host_irq_enable = MT_WFDMA0_HOST_INT_ENA, + .tx = { + .all_complete_mask = MT_INT_TX_DONE_ALL, + .mcu_complete_mask = MT_INT_TX_DONE_MCU, + }, + .rx = { + .data_complete_mask = HOST_RX_DONE_INT_ENA2, + .wm_complete_mask = HOST_RX_DONE_INT_ENA0, + }, + }; + struct ieee80211_ops *ops; + struct mt76_bus_ops *bus_ops; + struct mt792x_dev *dev; + struct mt76_dev *mdev; + u8 features; + int ret; + u16 cmd; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) { + cmd |= PCI_COMMAND_MEMORY; + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + goto err_free_pci_vec; + + if (mt7925_disable_aspm) + mt76_pci_disable_aspm(pdev); + + ops = mt792x_get_mac80211_ops(&pdev->dev, &mt7925_ops, + (void *)id->driver_data, &features); + if (!ops) { + ret = -ENOMEM; + goto err_free_pci_vec; + } + + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) { + ret = -ENOMEM; + goto err_free_pci_vec; + } + + pci_set_drvdata(pdev, mdev); + + dev = container_of(mdev, struct mt792x_dev, mt76); + dev->fw_features = features; + dev->hif_ops = &mt7925_pcie_ops; + dev->irq_map = &irq_map; + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + tasklet_init(&mdev->irq_tasklet, mt792x_irq_tasklet, (unsigned long)dev); + + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + GFP_KERNEL); + if (!bus_ops) { + ret = -ENOMEM; + goto err_free_dev; + } + + bus_ops->rr = mt7925_rr; + bus_ops->wr = mt7925_wr; + bus_ops->rmw = mt7925_rmw; + dev->mt76.bus = bus_ops; + + ret = __mt792x_mcu_fw_pmctrl(dev); + if (ret) + goto err_free_dev; + + ret = __mt792xe_mcu_drv_pmctrl(dev); + if (ret) + goto err_free_dev; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + ret = mt792x_wfsys_reset(dev); + if (ret) + goto err_free_dev; + + mt76_wr(dev, irq_map.host_irq_enable, 0); + + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + + ret = devm_request_irq(mdev->dev, pdev->irq, mt792x_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto err_free_dev; + + ret = mt7925_dma_init(dev); + if (ret) + goto err_free_irq; + + ret = mt7925_register_device(dev); + if (ret) + goto err_free_irq; + + return 0; + +err_free_irq: + devm_free_irq(&pdev->dev, pdev->irq, dev); +err_free_dev: + mt76_free_device(&dev->mt76); +err_free_pci_vec: + pci_free_irq_vectors(pdev); + + return ret; +} + +static void mt7925_pci_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + + mt7925e_unregister_device(dev); + devm_free_irq(&pdev->dev, pdev->irq, dev); + mt76_free_device(&dev->mt76); + pci_free_irq_vectors(pdev); +} + +static int mt7925_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt76_connac_pm *pm = &dev->pm; + int i, err; + + pm->suspended = true; + flush_work(&dev->reset_work); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + + err = mt792x_mcu_drv_pmctrl(dev); + if (err < 0) + goto restore_suspend; + + /* always enable deep sleep during suspend to reduce + * power consumption + */ + mt7925_mcu_set_deep_sleep(dev, true); + + err = mt76_connac_mcu_set_hif_suspend(mdev, true); + if (err) + goto restore_suspend; + + napi_disable(&mdev->tx_napi); + mt76_worker_disable(&mdev->tx_worker); + + mt76_for_each_q_rx(mdev, i) { + napi_disable(&mdev->napi[i]); + } + + /* wait until dma is idle */ + mt76_poll(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000); + + /* put dma disabled */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + /* disable interrupt */ + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); + mt76_wr(dev, MT_WFDMA0_HOST_INT_DIS, + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); + + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + + synchronize_irq(pdev->irq); + tasklet_kill(&mdev->irq_tasklet); + + err = mt792x_mcu_fw_pmctrl(dev); + if (err) + goto restore_napi; + + return 0; + +restore_napi: + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + + if (!pm->ds_enable) + mt7925_mcu_set_deep_sleep(dev, false); + + mt76_connac_mcu_set_hif_suspend(mdev, false); + +restore_suspend: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} + +static int mt7925_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt76_connac_pm *pm = &dev->pm; + int i, err; + + err = mt792x_mcu_drv_pmctrl(dev); + if (err < 0) + goto failed; + + mt792x_wpdma_reinit_cond(dev); + + /* enable interrupt */ + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_connac_irq_enable(&dev->mt76, + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); + mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); + + /* put dma enabled */ + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + mt76_worker_enable(&mdev->tx_worker); + + local_bh_disable(); + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + napi_schedule(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + napi_schedule(&mdev->tx_napi); + local_bh_enable(); + + err = mt76_connac_mcu_set_hif_suspend(mdev, false); + + /* restore previous ds setting */ + if (!pm->ds_enable) + mt7925_mcu_set_deep_sleep(dev, false); + +failed: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} + +static void mt7925_pci_shutdown(struct pci_dev *pdev) +{ + mt7925_pci_remove(pdev); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(mt7925_pm_ops, mt7925_pci_suspend, mt7925_pci_resume); + +static struct pci_driver mt7925_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7925_pci_device_table, + .probe = mt7925_pci_probe, + .remove = mt7925_pci_remove, + .shutdown = mt7925_pci_shutdown, + .driver.pm = pm_sleep_ptr(&mt7925_pm_ops), +}; + +module_pci_driver(mt7925_pci_driver); + +MODULE_DEVICE_TABLE(pci, mt7925_pci_device_table); +MODULE_FIRMWARE(MT7925_FIRMWARE_WM); +MODULE_FIRMWARE(MT7925_ROM_PATCH); +MODULE_AUTHOR("Deren Wu "); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c new file mode 100644 index 0000000000..9fca887977 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt7925.h" +#include "../dma.h" +#include "mac.h" + +int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct mt76_connac_hw_txp *txp; + struct mt76_txwi_cache *t; + int id, pid; + u8 *txwi = (u8 *)txwi_ptr; + + if (unlikely(tx_info->skb->len <= ETH_HLEN)) + return -EINVAL; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + id = mt76_token_consume(mdev, &t); + if (id < 0) + return id; + + if (sta) { + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + + if (time_after(jiffies, msta->last_txs + HZ / 4)) { + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + msta->last_txs = jiffies; + } + } + + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + mt7925_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, key, + pid, qid, 0); + + txp = (struct mt76_connac_hw_txp *)(txwi + MT_TXD_SIZE); + memset(txp, 0, sizeof(struct mt76_connac_hw_txp)); + mt76_connac_write_hw_txp(mdev, tx_info, txp, id); + + tx_info->skb = NULL; + + return 0; +} + +void mt7925_tx_token_put(struct mt792x_dev *dev) +{ + struct mt76_txwi_cache *txwi; + int id; + + spin_lock_bh(&dev->mt76.token_lock); + idr_for_each_entry(&dev->mt76.token, txwi, id) { + mt7925_txwi_free(dev, txwi, NULL, false, NULL); + dev->mt76.token_count--; + } + spin_unlock_bh(&dev->mt76.token_lock); + idr_destroy(&dev->mt76.token); +} + +int mt7925e_mac_reset(struct mt792x_dev *dev) +{ + const struct mt792x_irq_map *irq_map = dev->irq_map; + int i, err; + + mt792xe_mcu_drv_pmctrl(dev); + + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + + mt76_txq_schedule_all(&dev->mphy); + + mt76_worker_disable(&dev->mt76.tx_worker); + if (irq_map->rx.data_complete_mask) + napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); + if (irq_map->rx.wm_complete_mask) + napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); + if (irq_map->rx.wm2_complete_mask) + napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); + if (irq_map->tx.all_complete_mask) + napi_disable(&dev->mt76.tx_napi); + + mt7925_tx_token_put(dev); + idr_init(&dev->mt76.token); + + mt792x_wpdma_reset(dev, true); + + local_bh_disable(); + mt76_for_each_q_rx(&dev->mt76, i) { + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); + } + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + local_bh_enable(); + + dev->fw_assert = false; + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + + mt76_wr(dev, dev->irq_map->host_irq_enable, + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + + err = mt792xe_mcu_fw_pmctrl(dev); + if (err) + return err; + + err = __mt792xe_mcu_drv_pmctrl(dev); + if (err) + goto out; + + err = mt7925_run_firmware(dev); + if (err) + goto out; + + err = mt7925_mcu_set_eeprom(dev); + if (err) + goto out; + + err = mt7925_mac_init(dev); + if (err) + goto out; + + err = __mt7925_start(&dev->phy); +out: + clear_bit(MT76_RESET, &dev->mphy.state); + + mt76_worker_enable(&dev->mt76.tx_worker); + + return err; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c new file mode 100644 index 0000000000..f95bc5dcd8 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt7925.h" +#include "mcu.h" + +static int +mt7925_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + enum mt76_mcuq_id txq = MT_MCUQ_WM; + int ret; + + ret = mt7925_mcu_fill_message(mdev, skb, cmd, seq); + if (ret) + return ret; + + mdev->mcu.timeout = 3 * HZ; + + if (cmd == MCU_CMD(FW_SCATTER)) + txq = MT_MCUQ_FWDL; + + return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); +} + +int mt7925e_mcu_init(struct mt792x_dev *dev) +{ + static const struct mt76_mcu_ops mt7925_mcu_ops = { + .headroom = sizeof(struct mt76_connac2_mcu_txd), + .mcu_skb_send_msg = mt7925_mcu_send_message, + .mcu_parse_response = mt7925_mcu_parse_response, + }; + int err; + + dev->mt76.mcu_ops = &mt7925_mcu_ops; + + err = mt792xe_mcu_fw_pmctrl(dev); + if (err) + return err; + + err = __mt792xe_mcu_drv_pmctrl(dev); + if (err) + return err; + + mt76_rmw_field(dev, MT_PCIE_MAC_PM, MT_PCIE_MAC_PM_L0S_DIS, 1); + + err = mt7925_run_firmware(dev); + + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); + + return err; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h new file mode 100644 index 0000000000..985794a40c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_REGS_H +#define __MT7925_REGS_H + +#include "../mt792x_regs.h" + +#define MT_MDP_BASE 0x820cc800 +#define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) + +#define MT_MDP_DCR0 MT_MDP(0x000) +#define MT_MDP_DCR0_DAMSDU_EN BIT(15) +#define MT_MDP_DCR0_RX_HDR_TRANS_EN BIT(19) + +#define MT_MDP_DCR1 MT_MDP(0x004) +#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3) + +#define MT_MDP_BNRCFR0(_band) MT_MDP(0x090 + ((_band) << 8)) +#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4) +#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6) +#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8) + +#define MT_MDP_BNRCFR1(_band) MT_MDP(0x094 + ((_band) << 8)) +#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22) +#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27) +#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29) +#define MT_MDP_TO_HIF 0 +#define MT_MDP_TO_WM 1 + +#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x228) +#define MT_WFDMA0_HOST_INT_DIS MT_WFDMA0(0x22c) +#define HOST_RX_DONE_INT_ENA4 BIT(12) +#define HOST_RX_DONE_INT_ENA5 BIT(13) +#define HOST_RX_DONE_INT_ENA6 BIT(14) +#define HOST_RX_DONE_INT_ENA7 BIT(15) +#define HOST_RX_DONE_INT_ENA8 BIT(16) +#define HOST_RX_DONE_INT_ENA9 BIT(17) +#define HOST_RX_DONE_INT_ENA10 BIT(18) +#define HOST_RX_DONE_INT_ENA11 BIT(19) +#define HOST_TX_DONE_INT_ENA15 BIT(25) +#define HOST_TX_DONE_INT_ENA16 BIT(26) +#define HOST_TX_DONE_INT_ENA17 BIT(27) + +/* WFDMA interrupt */ +#define MT_INT_RX_DONE_DATA HOST_RX_DONE_INT_ENA2 +#define MT_INT_RX_DONE_WM HOST_RX_DONE_INT_ENA0 +#define MT_INT_RX_DONE_WM2 HOST_RX_DONE_INT_ENA1 +#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_DATA | \ + MT_INT_RX_DONE_WM | \ + MT_INT_RX_DONE_WM2) + +#define MT_INT_TX_DONE_MCU_WM (HOST_TX_DONE_INT_ENA15 | \ + HOST_TX_DONE_INT_ENA17) + +#define MT_INT_TX_DONE_FWDL HOST_TX_DONE_INT_ENA16 +#define MT_INT_TX_DONE_BAND0 HOST_TX_DONE_INT_ENA0 + +#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WM | \ + MT_INT_TX_DONE_FWDL) +#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU_WM | \ + MT_INT_TX_DONE_BAND0 | \ + GENMASK(18, 4)) + +#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500) + +#define MT_INFRA_CFG_BASE 0xd1000 +#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) + +#define MT_HIF_REMAP_L1 0x155024 +#define MT_HIF_REMAP_L1_MASK GENMASK(31, 16) +#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0) +#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16) +#define MT_HIF_REMAP_BASE_L1 0x130000 + +#define MT_HIF_REMAP_L2 0x0120 +#if IS_ENABLED(CONFIG_MT76_DEV) +#define MT_HIF_REMAP_BASE_L2 (0x7c500000 - (0x7c000000 - 0x18000000)) +#else +#define MT_HIF_REMAP_BASE_L2 0x18500000 +#endif + +#define MT_WFSYS_SW_RST_B 0x7c000140 + +#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x370) +#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(4, 0) + +#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x380) +#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(11, 0) +#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(14) + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c new file mode 100644 index 0000000000..9b885c5b3e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include +#include +#include + +#include "mt7925.h" +#include "mcu.h" +#include "mac.h" + +static const struct usb_device_id mt7925u_device_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + { }, +}; + +static int +mt7925u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 pad, ep; + int ret; + + ret = mt7925_mcu_fill_message(mdev, skb, cmd, seq); + if (ret) + return ret; + + mdev->mcu.timeout = 3 * HZ; + + if (cmd != MCU_CMD(FW_SCATTER)) + ep = MT_EP_OUT_INBAND_CMD; + else + ep = MT_EP_OUT_AC_BE; + + mt792x_skb_add_usb_sdio_hdr(dev, skb, 0); + pad = round_up(skb->len, 4) + 4 - skb->len; + __skb_put_zero(skb, pad); + + ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL, + 1000, ep); + dev_kfree_skb(skb); + + return ret; +} + +static int mt7925u_mcu_init(struct mt792x_dev *dev) +{ + static const struct mt76_mcu_ops mcu_ops = { + .headroom = MT_SDIO_HDR_SIZE + + sizeof(struct mt76_connac2_mcu_txd), + .tailroom = MT_USB_TAIL_SIZE, + .mcu_skb_send_msg = mt7925u_mcu_send_message, + .mcu_parse_response = mt7925_mcu_parse_response, + }; + int ret; + + dev->mt76.mcu_ops = &mcu_ops; + + mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + ret = mt7925_run_firmware(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + return 0; +} + +static int mt7925u_mac_reset(struct mt792x_dev *dev) +{ + int err; + + mt76_txq_schedule_all(&dev->mphy); + mt76_worker_disable(&dev->mt76.tx_worker); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + mt792xu_wfsys_reset(dev); + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + err = mt76u_resume_rx(&dev->mt76); + if (err) + goto out; + + err = mt792xu_mcu_power_on(dev); + if (err) + goto out; + + err = mt792xu_dma_init(dev, false); + if (err) + goto out; + + mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); + mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + err = mt7925_run_firmware(dev); + if (err) + goto out; + + mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + err = mt7925_mcu_set_eeprom(dev); + if (err) + goto out; + + err = mt7925_mac_init(dev); + if (err) + goto out; + + err = __mt7925_start(&dev->phy); +out: + clear_bit(MT76_RESET, &dev->mphy.state); + + mt76_worker_enable(&dev->mt76.tx_worker); + + return err; +} + +static int mt7925u_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = MT_SDIO_TXD_SIZE, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ | + MT_DRV_AMSDU_OFFLOAD, + .survey_flags = SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BSS_RX, + .tx_prepare_skb = mt7925_usb_sdio_tx_prepare_skb, + .tx_complete_skb = mt7925_usb_sdio_tx_complete_skb, + .tx_status_data = mt7925_usb_sdio_tx_status_data, + .rx_skb = mt7925_queue_rx_skb, + .rx_check = mt7925_rx_check, + .sta_add = mt7925_mac_sta_add, + .sta_assoc = mt7925_mac_sta_assoc, + .sta_remove = mt7925_mac_sta_remove, + .update_survey = mt792x_update_channel, + }; + static const struct mt792x_hif_ops hif_ops = { + .mcu_init = mt7925u_mcu_init, + .init_reset = mt792xu_init_reset, + .reset = mt7925u_mac_reset, + }; + static struct mt76_bus_ops bus_ops = { + .rr = mt792xu_rr, + .wr = mt792xu_wr, + .rmw = mt792xu_rmw, + .read_copy = mt76u_read_copy, + .write_copy = mt792xu_copy, + .type = MT76_BUS_USB, + }; + struct usb_device *udev = interface_to_usbdev(usb_intf); + struct ieee80211_ops *ops; + struct ieee80211_hw *hw; + struct mt792x_dev *dev; + struct mt76_dev *mdev; + u8 features; + int ret; + + ops = mt792x_get_mac80211_ops(&usb_intf->dev, &mt7925_ops, + (void *)id->driver_info, &features); + if (!ops) + return -ENOMEM; + + ops->stop = mt792xu_stop; + + mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt792x_dev, mt76); + dev->fw_features = features; + dev->hif_ops = &hif_ops; + + udev = usb_get_dev(udev); + usb_reset_device(udev); + + usb_set_intfdata(usb_intf, dev); + + ret = __mt76u_init(mdev, usb_intf, &bus_ops); + if (ret < 0) + goto error; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + if (mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY)) { + ret = mt792xu_wfsys_reset(dev); + if (ret) + goto error; + } + + ret = mt792xu_mcu_power_on(dev); + if (ret) + goto error; + + ret = mt76u_alloc_mcu_queue(&dev->mt76); + if (ret) + goto error; + + ret = mt76u_alloc_queues(&dev->mt76); + if (ret) + goto error; + + ret = mt792xu_dma_init(dev, false); + if (ret) + goto error; + + hw = mt76_hw(dev); + /* check hw sg support in order to enable AMSDU */ + hw->max_tx_fragments = mdev->usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1; + + ret = mt7925_register_device(dev); + if (ret) + goto error; + + return 0; + +error: + mt76u_queues_deinit(&dev->mt76); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + mt76_free_device(&dev->mt76); + + return ret; +} + +#ifdef CONFIG_PM +static int mt7925u_suspend(struct usb_interface *intf, pm_message_t state) +{ + struct mt792x_dev *dev = usb_get_intfdata(intf); + struct mt76_connac_pm *pm = &dev->pm; + int err; + + pm->suspended = true; + flush_work(&dev->reset_work); + + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + if (err) + goto failed; + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + return 0; + +failed: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} + +static int mt7925u_resume(struct usb_interface *intf) +{ + struct mt792x_dev *dev = usb_get_intfdata(intf); + struct mt76_connac_pm *pm = &dev->pm; + bool reinit = true; + int err, i; + + for (i = 0; i < 10; i++) { + u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT); + + if (!(val & MT_WF_SW_SER_TRIGGER_SUSPEND)) { + reinit = false; + break; + } + if (val & MT_WF_SW_SER_DONE_SUSPEND) { + mt76_wr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT, 0); + break; + } + + msleep(20); + } + + if (reinit || mt792x_dma_need_reinit(dev)) { + err = mt792xu_dma_init(dev, true); + if (err) + goto failed; + } + + err = mt76u_resume_rx(&dev->mt76); + if (err < 0) + goto failed; + + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); +failed: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} +#endif /* CONFIG_PM */ + +MODULE_DEVICE_TABLE(usb, mt7925u_device_table); +MODULE_FIRMWARE(MT7925_FIRMWARE_WM); +MODULE_FIRMWARE(MT7925_ROM_PATCH); + +static struct usb_driver mt7925u_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7925u_device_table, + .probe = mt7925u_probe, + .disconnect = mt792xu_disconnect, +#ifdef CONFIG_PM + .suspend = mt7925u_suspend, + .resume = mt7925u_resume, + .reset_resume = mt7925u_resume, +#endif /* CONFIG_PM */ + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt7925u_driver); + +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index 6c347495e1..36fae736dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -25,6 +25,8 @@ #define MT792x_FW_TAG_FEATURE 4 #define MT792x_FW_CAP_CNM BIT(7) +#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0) + /* NOTE: used to map mt76_rates. idx may change if firmware expands table */ #define MT792x_BASIC_RATES_TBL 11 @@ -36,9 +38,14 @@ #define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin" #define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin" +#define MT7925_FIRMWARE_WM "mediatek/mt7925/WIFI_RAM_CODE_MT7925_1_1.bin" #define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin" #define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin" +#define MT7925_ROM_PATCH "mediatek/mt7925/WIFI_MT7925_PATCH_MCU_1_1_hdr.bin" + +#define MT792x_SDIO_HDR_TX_BYTES GENMASK(15, 0) +#define MT792x_SDIO_HDR_PKT_TYPE GENMASK(17, 16) struct mt792x_vif; struct mt792x_sta; @@ -61,6 +68,14 @@ enum { MT792x_CLC_MAX_NUM, }; +enum mt792x_reg_power_type { + MT_AP_UNSET = 0, + MT_AP_DEFAULT, + MT_AP_LPI, + MT_AP_SP, + MT_AP_VLP, +}; + DECLARE_EWMA(avg_signal, 10, 8) struct mt792x_sta { @@ -112,6 +127,8 @@ struct mt792x_phy { struct mt76_mib_stats mib; u8 sta_work_count; + u8 clc_chan_conf; + enum mt792x_reg_power_type power_type; struct sk_buff_head scan_event_list; struct delayed_work scan_work; @@ -119,6 +136,7 @@ struct mt792x_phy { void *acpisar; #endif void *clc[MT792x_CLC_MAX_NUM]; + u64 chip_cap; struct work_struct roc_work; struct timer_list roc_timer; @@ -228,6 +246,7 @@ static inline bool mt792x_dma_need_reinit(struct mt792x_dev *dev) #define mt792x_mutex_release(dev) \ mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm) +void mt792x_stop(struct ieee80211_hw *hw); void mt792x_pm_wake_work(struct work_struct *work); void mt792x_pm_power_save_work(struct work_struct *work); void mt792x_reset(struct mt76_dev *mdev); @@ -307,6 +326,8 @@ static inline char *mt792x_ram_name(struct mt792x_dev *dev) switch (mt76_chip(&dev->mt76)) { case 0x7922: return MT7922_FIRMWARE_WM; + case 0x7925: + return MT7925_FIRMWARE_WM; default: return MT7921_FIRMWARE_WM; } @@ -317,6 +338,8 @@ static inline char *mt792x_patch_name(struct mt792x_dev *dev) switch (mt76_chip(&dev->mt76)) { case 0x7922: return MT7922_ROM_PATCH; + case 0x7925: + return MT7925_ROM_PATCH; default: return MT7921_ROM_PATCH; } @@ -336,6 +359,20 @@ void mt792xu_wr(struct mt76_dev *dev, u32 addr, u32 val); u32 mt792xu_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val); void mt792xu_copy(struct mt76_dev *dev, u32 offset, const void *data, int len); void mt792xu_disconnect(struct usb_interface *usb_intf); +void mt792xu_stop(struct ieee80211_hw *hw); + +static inline void +mt792x_skb_add_usb_sdio_hdr(struct mt792x_dev *dev, struct sk_buff *skb, + int type) +{ + u32 hdr, len; + + len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr); + hdr = FIELD_PREP(MT792x_SDIO_HDR_TX_BYTES, len) | + FIELD_PREP(MT792x_SDIO_HDR_PKT_TYPE, type); + + put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr))); +} int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev); int mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index f111c47fdc..502be22dbe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -91,6 +91,28 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, } EXPORT_SYMBOL_GPL(mt792x_tx); +void mt792x_stop(struct ieee80211_hw *hw) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + cancel_delayed_work_sync(&phy->mt76->mac_work); + + cancel_delayed_work_sync(&dev->pm.ps_work); + cancel_work_sync(&dev->pm.wake_work); + cancel_work_sync(&dev->reset_work); + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + if (is_mt7921(&dev->mt76)) { + mt792x_mutex_acquire(dev); + mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); + mt792x_mutex_release(dev); + } + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); +} +EXPORT_SYMBOL_GPL(mt792x_stop); + void mt792x_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -115,7 +137,7 @@ void mt792x_remove_interface(struct ieee80211_hw *hw, list_del_init(&msta->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_packet_id_flush(&dev->mt76, &msta->wcid); + mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } EXPORT_SYMBOL_GPL(mt792x_remove_interface); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c index a3dbd3865b..488326ce5e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c @@ -88,25 +88,44 @@ EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete); #define PREFETCH(base, depth) ((base) << 16 | (depth)) static void mt792x_dma_prefetch(struct mt792x_dev *dev) { - mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4)); - - mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4)); + if (is_mt7925(&dev->mt76)) { + /* rx ring */ + mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4)); + /* tx ring */ + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0200, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0300, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x0400, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x0500, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0540, 0x4)); + } else { + /* rx ring */ + mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4)); + /* tx ring */ + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4)); + } } int mt792x_dma_enable(struct mt792x_dev *dev) { + if (is_mt7925(&dev->mt76)) + mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28)); + /* configure perfetch settings */ mt792x_dma_prefetch(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c index 20e7f9c7c8..2dd283caed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c @@ -287,6 +287,15 @@ int mt792xu_init_reset(struct mt792x_dev *dev) } EXPORT_SYMBOL_GPL(mt792xu_init_reset); +void mt792xu_stop(struct ieee80211_hw *hw) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt76u_stop_tx(&dev->mt76); + mt792x_stop(hw); +} +EXPORT_SYMBOL_GPL(mt792xu_stop); + void mt792xu_disconnect(struct usb_interface *usb_intf) { struct mt792x_dev *dev = usb_get_intfdata(usb_intf); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index 66d8cc0eea..55cb1770fa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -54,23 +54,31 @@ static void mt7996_led_set_config(struct led_classdev *led_cdev, dev = container_of(mphy->dev, struct mt7996_dev, mt76); /* select TX blink mode, 2: only data frames */ - mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2); + mt76_rmw_field(dev, MT_TMAC_TCR0(mphy->band_idx), MT_TMAC_TCR0_TX_BLINK, 2); /* enable LED */ - mt76_wr(dev, MT_LED_EN(0), 1); + mt76_wr(dev, MT_LED_EN(mphy->band_idx), 1); /* set LED Tx blink on/off time */ val = FIELD_PREP(MT_LED_TX_BLINK_ON_MASK, delay_on) | FIELD_PREP(MT_LED_TX_BLINK_OFF_MASK, delay_off); - mt76_wr(dev, MT_LED_TX_BLINK(0), val); + mt76_wr(dev, MT_LED_TX_BLINK(mphy->band_idx), val); + + /* turn LED off */ + if (delay_off == 0xff && delay_on == 0x0) { + val = MT_LED_CTRL_POLARITY | MT_LED_CTRL_KICK; + } else { + /* control LED */ + val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK; + if (mphy->band_idx == MT_BAND1) + val |= MT_LED_CTRL_BLINK_BAND_SEL; + } - /* control LED */ - val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK; if (mphy->leds.al) val |= MT_LED_CTRL_POLARITY; - mt76_wr(dev, MT_LED_CTRL(0), val); - mt76_clear(dev, MT_LED_CTRL(0), MT_LED_CTRL_KICK); + mt76_wr(dev, MT_LED_CTRL(mphy->band_idx), val); + mt76_clear(dev, MT_LED_CTRL(mphy->band_idx), MT_LED_CTRL_KICK); } static int mt7996_led_set_blink(struct led_classdev *led_cdev, @@ -173,6 +181,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw) wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); wiphy->reg_notifier = mt7996_regd_notifier; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wiphy->mbssid_max_interfaces = 16; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); @@ -196,6 +205,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); hw->max_tx_fragments = 4; @@ -223,6 +233,12 @@ mt7996_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); } + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + phy->mt76->leds.cdev.brightness_set = mt7996_led_set_brightness; + phy->mt76->leds.cdev.blink_set = mt7996_led_set_blink; + } + mt76_set_stream_caps(phy->mt76, true); mt7996_set_stream_vht_txbf_caps(phy); mt7996_set_stream_he_eht_caps(phy); @@ -258,6 +274,11 @@ mt7996_mac_init_band(struct mt7996_dev *dev, u8 band) set = FIELD_PREP(MT_WTBLOFF_RSCR_RCPI_MODE, 0) | FIELD_PREP(MT_WTBLOFF_RSCR_RCPI_PARAM, 0x3); mt76_rmw(dev, MT_WTBLOFF_RSCR(band), mask, set); + + /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than + * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set. + */ + mt76_set(dev, MT_AGG_ACR4(band), MT_AGG_ACR_PPDU_TXS2H); } static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev) @@ -828,8 +849,7 @@ __mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy, n++; } - sband->iftype_data = data; - sband->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(sband, data, n); } void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy) @@ -871,12 +891,6 @@ int mt7996_register_device(struct mt7996_dev *dev) mt7996_init_wiphy(hw); - /* init led callbacks */ - if (IS_ENABLED(CONFIG_MT76_LEDS)) { - dev->mphy.leds.cdev.brightness_set = mt7996_led_set_brightness; - dev->mphy.leds.cdev.blink_set = mt7996_led_set_blink; - } - ret = mt76_register_device(&dev->mt76, true, mt76_rates, ARRAY_SIZE(mt76_rates)); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 26d5675202..fa3001e59a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -950,15 +950,6 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (!wcid) wcid = &dev->mt76.global_wcid; - if (sta) { - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; - - if (time_after(jiffies, msta->jiffies + HZ / 4)) { - info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->jiffies = jiffies; - } - } - t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); t->skb = tx_info->skb; @@ -1006,22 +997,35 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } static void -mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) { struct mt7996_sta *msta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; u16 fc, tid; - u32 val; if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) return; - tid = le32_get_bits(txwi[1], MT_TXD1_TID); + tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; if (tid >= 6) /* skip VO queue */ return; - val = le32_to_cpu(txwi[2]); - fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | - FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; + if (is_8023) { + fc = IEEE80211_FTYPE_DATA | + (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA); + } else { + /* No need to get precise TID for Action/Management Frame, + * since it will not meet the following Frame Control + * condition anyway. + */ + + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + fc = le16_to_cpu(hdr->frame_control) & + (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); + } + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) return; @@ -1049,7 +1053,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, wcid_idx = wcid->idx; if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7996_tx_check_aggr(sta, txwi); + mt7996_tx_check_aggr(sta, t->skb); } else { wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); } @@ -1070,6 +1074,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; struct mt76_txwi_cache *txwi; struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; LIST_HEAD(free_list); struct sk_buff *skb, *tmp; void *end = data + len; @@ -1088,7 +1093,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); } - if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4)) + if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5)) return; total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); @@ -1104,7 +1109,6 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) info = le32_to_cpu(*cur_info); if (info & MT_TXFREE_INFO_PAIR) { struct mt7996_sta *msta; - struct mt76_wcid *wcid; u16 idx; idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); @@ -1120,10 +1124,21 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) &mdev->sta_poll_list); spin_unlock_bh(&mdev->sta_poll_lock); continue; - } + } else if (info & MT_TXFREE_INFO_HEADER) { + u32 tx_retries = 0, tx_failed = 0; - if (info & MT_TXFREE_INFO_HEADER) + if (!wcid) + continue; + + tx_retries = + FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; + tx_failed = tx_retries + + !!FIELD_GET(MT_TXFREE_INFO_STAT, info); + + wcid->stats.tx_retries += tx_retries; + wcid->stats.tx_failed += tx_failed; continue; + } for (i = 0; i < 2; i++) { msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; @@ -1167,22 +1182,31 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, bool cck = false; u32 txrate, txs, mode, stbc; + txs = le32_to_cpu(txs_data[0]); + mt76_tx_status_lock(mdev, &list); skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); - if (!skb) - goto out_no_skb; - txs = le32_to_cpu(txs_data[0]); + if (skb) { + info = IEEE80211_SKB_CB(skb); + if (!(txs & MT_TXS0_ACK_ERROR_MASK)) + info->flags |= IEEE80211_TX_STAT_ACK; - info = IEEE80211_SKB_CB(skb); - if (!(txs & MT_TXS0_ACK_ERROR_MASK)) - info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = + !!(info->flags & IEEE80211_TX_STAT_ACK); + + info->status.rates[0].idx = -1; + } - info->status.ampdu_len = 1; - info->status.ampdu_ack_len = !!(info->flags & - IEEE80211_TX_STAT_ACK); + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { + struct ieee80211_sta *sta; + u8 tid; - info->status.rates[0].idx = -1; + sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); + tid = FIELD_GET(MT_TXS0_TID, txs); + ieee80211_refresh_tx_agg_session_timer(sta, tid); + } txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); @@ -1282,9 +1306,8 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, wcid->rate = rate; out: - mt76_tx_status_skb_done(mdev, skb, &list); - -out_no_skb: + if (skb) + mt76_tx_status_skb_done(mdev, skb, &list); mt76_tx_status_unlock(mdev, &list); return !!skb; @@ -1298,13 +1321,10 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) u16 wcidx; u8 pid; - if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) - return; - wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); pid = le32_get_bits(txs_data[3], MT_TXS3_PID); - if (pid < MT_PACKET_ID_FIRST) + if (pid < MT_PACKET_ID_NO_SKB) return; if (wcidx >= mt7996_wtbl_size(dev)) @@ -2191,6 +2211,11 @@ void mt7996_mac_work(struct work_struct *work) mphy->mac_work_count = 0; mt7996_mac_update_stats(phy); + + if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { + mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); + mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); + } } mutex_unlock(&mphy->dev->mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index 620880e560..09c7a28a3d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -207,7 +207,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mvif->sta.wcid.phy_idx = band_idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -248,8 +248,8 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw, struct mt7996_phy *phy = mt7996_hw_phy(hw); int idx = msta->wcid.idx; - mt7996_mcu_add_bss_info(phy, vif, false); mt7996_mcu_add_sta(dev, vif, NULL, false); + mt7996_mcu_add_bss_info(phy, vif, false); if (vif == phy->monitor_vif) phy->monitor_vif = NULL; @@ -268,7 +268,7 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw, list_del_init(&msta->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_packet_id_flush(&dev->mt76, &msta->wcid); + mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } int mt7996_set_channel(struct mt7996_phy *phy) @@ -570,17 +570,13 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, /* station mode uses BSSID to map the wlan entry to a peer, * and then peer references bss_info_rfch to set bandwidth cap. */ - if (changed & BSS_CHANGED_BSSID && - vif->type == NL80211_IFTYPE_STATION) { - bool join = !is_zero_ether_addr(info->bssid); - - mt7996_mcu_add_bss_info(phy, vif, join); - mt7996_mcu_add_sta(dev, vif, NULL, join); + if ((changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) || + (changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) || + (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) { + mt7996_mcu_add_bss_info(phy, vif, true); + mt7996_mcu_add_sta(dev, vif, NULL, true); } - if (changed & BSS_CHANGED_ASSOC) - mt7996_mcu_add_bss_info(phy, vif, vif->cfg.assoc); - if (changed & BSS_CHANGED_ERP_CTS_PROT) mt7996_mac_enable_rtscts(dev, vif, info->use_cts_prot); @@ -601,11 +597,6 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, mvif->basic_rates_idx = mt7996_get_rates_table(hw, vif, false, false); - if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { - mt7996_mcu_add_bss_info(phy, vif, true); - mt7996_mcu_add_sta(dev, vif, NULL, true); - } - /* ensure that enable txcmd_mode after bss_info */ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) mt7996_mcu_set_tx(dev, vif); @@ -666,7 +657,6 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, msta->wcid.idx = idx; msta->wcid.phy_idx = band_idx; msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->jiffies = jiffies; ewma_avg_signal_init(&msta->avg_ack_signal); @@ -978,6 +968,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { + struct mt7996_phy *phy = mt7996_hw_phy(hw); struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct rate_info *txrate = &msta->wcid.rate; @@ -998,11 +989,31 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw, sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + sinfo->tx_failed = msta->wcid.stats.tx_failed; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + + sinfo->tx_retries = msta->wcid.stats.tx_retries; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + sinfo->ack_signal = (s8)msta->ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); + + if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { + sinfo->tx_bytes = msta->wcid.stats.tx_bytes; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); + + sinfo->rx_bytes = msta->wcid.stats.rx_bytes; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); + + sinfo->tx_packets = msta->wcid.stats.tx_packets; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + + sinfo->rx_packets = msta->wcid.stats.rx_packets; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); + } } static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 7575d3506e..bf917beb94 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -324,8 +324,10 @@ int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) static void mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (vif->bss_conf.csa_active) - ieee80211_csa_finish(vif); + if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION) + return; + + ieee80211_csa_finish(vif); } static void @@ -399,7 +401,7 @@ out: static void mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (!vif->bss_conf.color_change_active) + if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) return; ieee80211_color_change_finish(vif); @@ -447,6 +449,54 @@ mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb) } } +static void +mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb) +{ + struct mt7996_mcu_all_sta_info_event *res; + u16 i; + + skb_pull(skb, sizeof(struct mt7996_mcu_rxd)); + + res = (struct mt7996_mcu_all_sta_info_event *)skb->data; + + for (i = 0; i < le16_to_cpu(res->sta_num); i++) { + u8 ac; + u16 wlan_idx; + struct mt76_wcid *wcid; + + switch (le16_to_cpu(res->tag)) { + case UNI_ALL_STA_TXRX_ADM_STAT: + wlan_idx = le16_to_cpu(res->adm_stat[i].wlan_idx); + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + + if (!wcid) + break; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + wcid->stats.tx_bytes += + le32_to_cpu(res->adm_stat[i].tx_bytes[ac]); + wcid->stats.rx_bytes += + le32_to_cpu(res->adm_stat[i].rx_bytes[ac]); + } + break; + case UNI_ALL_STA_TXRX_MSDU_COUNT: + wlan_idx = le16_to_cpu(res->msdu_cnt[i].wlan_idx); + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + + if (!wcid) + break; + + wcid->stats.tx_packets += + le32_to_cpu(res->msdu_cnt[i].tx_msdu_cnt); + wcid->stats.rx_packets += + le32_to_cpu(res->msdu_cnt[i].rx_msdu_cnt); + break; + default: + break; + } + } +} + static void mt7996_mcu_rx_ext_event(struct mt7996_dev *dev, struct sk_buff *skb) { @@ -491,6 +541,9 @@ mt7996_mcu_uni_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb) case MCU_UNI_EVENT_RDD_REPORT: mt7996_mcu_rx_radar_detected(dev, skb); break; + case MCU_UNI_EVENT_ALL_STA_INFO: + mt7996_mcu_rx_all_sta_info_event(dev, skb); + break; default: break; } @@ -600,6 +653,24 @@ mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; } +static void +mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt7996_phy *phy, int enable) +{ + struct bss_info_uni_mbssid *mbssid; + struct tlv *tlv; + + tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid)); + + mbssid = (struct bss_info_uni_mbssid *)tlv; + + if (enable && vif->bss_conf.bssid_indicator) { + mbssid->max_indicator = vif->bss_conf.bssid_indicator; + mbssid->mbss_idx = vif->bss_conf.bssid_index; + mbssid->tx_bss_omac_idx = 0; + } +} + static void mt7996_mcu_bss_bmc_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct mt7996_phy *phy) @@ -866,6 +937,9 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, /* this tag is necessary no matter if the vif is MLD */ mt7996_mcu_bss_mld_tlv(skb, vif); } + + mt7996_mcu_bss_mbssid_tlv(skb, vif, phy, enable); + out: return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); @@ -1152,6 +1226,8 @@ mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb, HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); muru->ofdma_ul.uo_ra = HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); + muru->ofdma_ul.rx_ctrl_frame_to_mbss = + HE_MAC(CAP3_RX_CTRL_FRAME_TO_MULTIBSS, elem->mac_cap_info[3]); } static inline bool @@ -1624,6 +1700,132 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, MCU_WM_UNI_CMD(RA), true); } +static int +mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, void *data, u32 field) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct sta_phy *phy = data; + struct sta_rec_ra_fixed *ra; + struct sk_buff *skb; + struct tlv *tlv; + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid, + MT7996_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); + ra = (struct sta_rec_ra_fixed *)tlv; + + switch (field) { + case RATE_PARAM_AUTO: + break; + case RATE_PARAM_FIXED: + case RATE_PARAM_FIXED_MCS: + case RATE_PARAM_FIXED_GI: + case RATE_PARAM_FIXED_HE_LTF: + if (phy) + ra->phy = *phy; + break; + default: + break; + } + ra->field = cpu_to_le32(field); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); +} + +static int +mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; + struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask; + enum nl80211_band band = chandef->chan->band; + struct sta_phy phy = {}; + int ret, nrates = 0; + +#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \ + do { \ + u8 i, gi = mask->control[band]._gi; \ + gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \ + phy.sgi = gi; \ + phy.he_ltf = mask->control[band].he_ltf; \ + for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \ + if (!mask->control[band]._mcs[i]) \ + continue; \ + nrates += hweight16(mask->control[band]._mcs[i]); \ + phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \ + if (_ht) \ + phy.mcs += 8 * i; \ + } \ + } while (0) + + if (sta->deflink.he_cap.has_he) { + __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1); + } else if (sta->deflink.vht_cap.vht_supported) { + __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0); + } else if (sta->deflink.ht_cap.ht_supported) { + __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0); + } else { + nrates = hweight32(mask->control[band].legacy); + phy.mcs = ffs(mask->control[band].legacy) - 1; + } +#undef __sta_phy_bitrate_mask_check + + /* fall back to auto rate control */ + if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI && + mask->control[band].he_gi == GENMASK(7, 0) && + mask->control[band].he_ltf == GENMASK(7, 0) && + nrates != 1) + return 0; + + /* fixed single rate */ + if (nrates == 1) { + ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + RATE_PARAM_FIXED_MCS); + if (ret) + return ret; + } + + /* fixed GI */ + if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI || + mask->control[band].he_gi != GENMASK(7, 0)) { + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + u32 addr; + + /* firmware updates only TXCMD but doesn't take WTBL into + * account, so driver should update here to reflect the + * actual txrate hardware sends out. + */ + addr = mt7996_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7); + if (sta->deflink.he_cap.has_he) + mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); + else + mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); + + ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + RATE_PARAM_FIXED_GI); + if (ret) + return ret; + } + + /* fixed HE_LTF */ + if (mask->control[band].he_ltf != GENMASK(7, 0)) { + ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + RATE_PARAM_FIXED_HE_LTF); + if (ret) + return ret; + } + + return 0; +} + static void mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -1733,6 +1935,7 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct sk_buff *skb; + int ret; skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid, @@ -1752,8 +1955,12 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, */ mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); - return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); + if (ret) + return ret; + + return mt7996_mcu_add_rate_ctrl_fixed(dev, vif, sta); } static int @@ -1995,6 +2202,59 @@ mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb, info->cnt = skb->data[offs->cntdwn_counter_offs[0]]; } +static void +mt7996_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb, + struct ieee80211_vif *vif, struct bss_bcn_content_tlv *bcn, + struct ieee80211_mutable_offsets *offs) +{ + struct bss_bcn_mbss_tlv *mbss; + const struct element *elem; + struct tlv *tlv; + + if (!vif->bss_conf.bssid_indicator) + return; + + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_MBSSID, sizeof(*mbss)); + + mbss = (struct bss_bcn_mbss_tlv *)tlv; + mbss->offset[0] = cpu_to_le16(offs->tim_offset); + mbss->bitmap = cpu_to_le32(1); + + for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, + &skb->data[offs->mbssid_off], + skb->len - offs->mbssid_off) { + const struct element *sub_elem; + + if (elem->datalen < 2) + continue; + + for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) { + const struct ieee80211_bssid_index *idx; + const u8 *idx_ie; + + /* not a valid BSS profile */ + if (sub_elem->id || sub_elem->datalen < 4) + continue; + + /* Find WLAN_EID_MULTI_BSSID_IDX + * in the merged nontransmitted profile + */ + idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, + sub_elem->data, sub_elem->datalen); + if (!idx_ie || idx_ie[1] < sizeof(*idx)) + continue; + + idx = (void *)(idx_ie + 2); + if (!idx->bssid_index || idx->bssid_index > 31) + continue; + + mbss->offset[idx->bssid_index] = cpu_to_le16(idx_ie - + skb->data); + mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index)); + } + } +} + static void mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct sk_buff *rskb, struct sk_buff *skb, @@ -2036,6 +2296,9 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct bss_bcn_content_tlv *bcn; int len; + if (vif->bss_conf.nontransmitted) + return 0; + rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, MT7996_MAX_BSS_OFFLOAD_SIZE); if (IS_ERR(rskb)) @@ -2065,7 +2328,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, goto out; mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); - /* TODO: subtag - 11v MBSSID */ + mt7996_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs); mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs); out: dev_kfree_skb(skb); @@ -3801,3 +4064,20 @@ int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val) return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RRO), &req, sizeof(req), true); } + +int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag) +{ + struct mt7996_dev *dev = phy->dev; + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + } __packed req = { + .tag = cpu_to_le16(tag), + .len = cpu_to_le16(sizeof(req) - 4), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(ALL_STA_INFO), + &req, sizeof(req), false); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h index dc8d0a30c7..9300cd8eeb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h @@ -153,6 +153,32 @@ struct mt7996_mcu_mib { __le64 data; } __packed; +struct mt7996_mcu_all_sta_info_event { + u8 rsv[4]; + __le16 tag; + __le16 len; + u8 more; + u8 rsv2; + __le16 sta_num; + u8 rsv3[4]; + + union { + struct { + __le16 wlan_idx; + u8 rsv[2]; + __le32 tx_bytes[IEEE80211_NUM_ACS]; + __le32 rx_bytes[IEEE80211_NUM_ACS]; + } adm_stat[0] __packed; + + struct { + __le16 wlan_idx; + u8 rsv[2]; + __le32 tx_msdu_cnt; + __le32 rx_msdu_cnt; + } msdu_cnt[0] __packed; + } __packed; +} __packed; + enum mt7996_chan_mib_offs { UNI_MIB_OBSS_AIRTIME = 26, UNI_MIB_NON_WIFI_TIME = 27, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 7354e5cf8e..e53cf6a370 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -110,7 +110,6 @@ struct mt7996_sta { struct ewma_avg_signal avg_ack_signal; unsigned long changed; - unsigned long jiffies; struct mt76_connac_sta_key_conf bip; @@ -402,6 +401,7 @@ int mt7996_mcu_fw_dbg_ctrl(struct mt7996_dev *dev, u32 module, u8 level); int mt7996_mcu_trigger_assert(struct mt7996_dev *dev); void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb); void mt7996_mcu_exit(struct mt7996_dev *dev); +int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag); static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c index c5301050ff..67c0158962 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c @@ -17,11 +17,13 @@ static u32 hif_idx; static const struct pci_device_id mt7996_pci_device_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7990) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7992) }, { }, }; static const struct pci_device_id mt7996_hif_device_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7991) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x799a) }, { }, }; @@ -60,7 +62,9 @@ static void mt7996_put_hif2(struct mt7996_hif *hif) static struct mt7996_hif *mt7996_pci_init_hif2(struct pci_dev *pdev) { hif_idx++; - if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL)) + + if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL) && + !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x799a, NULL)) return NULL; writel(hif_idx | MT_PCIE_RECOG_ID_SEM, @@ -113,7 +117,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev, mt76_pci_disable_aspm(pdev); - if (id->device == 0x7991) + if (id->device == 0x7991 || id->device == 0x799a) return mt7996_pci_hif2_probe(pdev); dev = mt7996_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0], diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h index 97beab9245..0086a78666 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h @@ -243,6 +243,13 @@ enum base_rev { FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \ FIELD_PREP(MT_WTBL_LMAC_DW, _dw)) +/* AGG: band 0(0x820e2000), band 1(0x820f2000), band 2(0x830e2000) */ +#define MT_WF_AGG_BASE(_band) __BASE(WF_AGG_BASE, (_band)) +#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) + +#define MT_AGG_ACR4(_band) MT_WF_AGG(_band, 0x3c) +#define MT_AGG_ACR_PPDU_TXS2H BIT(1) + /* ARB: band 0(0x820e3000), band 1(0x820f3000), band 2(0x830e3000) */ #define MT_WF_ARB_BASE(_band) __BASE(WF_ARB_BASE, (_band)) #define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) @@ -509,6 +516,7 @@ enum base_rev { #define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4)) #define MT_LED_CTRL_KICK BIT(7) +#define MT_LED_CTRL_BLINK_BAND_SEL BIT(4) #define MT_LED_CTRL_BLINK_MODE BIT(2) #define MT_LED_CTRL_POLARITY BIT(1) diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 6cc26cc6c5..1809b03292 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -329,40 +329,32 @@ void mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) { - struct mt76_dev *dev = phy->dev; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct mt76_queue *q; - int qid = skb_get_queue_mapping(skb); if (mt76_testmode_enabled(phy)) { ieee80211_free_txskb(phy->hw, skb); return; } - if (WARN_ON(qid >= MT_TXQ_PSD)) { - qid = MT_TXQ_BE; - skb_set_queue_mapping(skb, qid); - } - - if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && - !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && - !ieee80211_is_data(hdr->frame_control) && - !ieee80211_is_bufferable_mmpdu(skb)) { - qid = MT_TXQ_PSD; - } + if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) + skb_set_queue_mapping(skb, MT_TXQ_BE); if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); - q = phy->q_tx[qid]; - spin_lock_bh(&q->lock); - __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); - dev->queue_ops->kick(dev, q); - spin_unlock_bh(&q->lock); + spin_lock_bh(&wcid->tx_pending.lock); + __skb_queue_tail(&wcid->tx_pending, skb); + spin_unlock_bh(&wcid->tx_pending.lock); + + spin_lock_bh(&phy->tx_lock); + if (list_empty(&wcid->tx_list)) + list_add_tail(&wcid->tx_list, &phy->tx_list); + spin_unlock_bh(&phy->tx_lock); + + mt76_worker_schedule(&phy->dev->tx_worker); } EXPORT_SYMBOL_GPL(mt76_tx); @@ -593,10 +585,86 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) } EXPORT_SYMBOL_GPL(mt76_txq_schedule); +static int +mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) +{ + struct mt76_dev *dev = phy->dev; + struct ieee80211_sta *sta; + struct mt76_queue *q; + struct sk_buff *skb; + int ret = 0; + + spin_lock(&wcid->tx_pending.lock); + while ((skb = skb_peek(&wcid->tx_pending)) != NULL) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int qid = skb_get_queue_mapping(skb); + + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + !ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(skb)) + qid = MT_TXQ_PSD; + + q = phy->q_tx[qid]; + if (mt76_txq_stopped(q)) { + ret = -1; + break; + } + + __skb_unlink(skb, &wcid->tx_pending); + spin_unlock(&wcid->tx_pending.lock); + + sta = wcid_to_sta(wcid); + spin_lock(&q->lock); + __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); + dev->queue_ops->kick(dev, q); + spin_unlock(&q->lock); + + spin_lock(&wcid->tx_pending.lock); + } + spin_unlock(&wcid->tx_pending.lock); + + return ret; +} + +static void mt76_txq_schedule_pending(struct mt76_phy *phy) +{ + if (list_empty(&phy->tx_list)) + return; + + local_bh_disable(); + rcu_read_lock(); + + spin_lock(&phy->tx_lock); + while (!list_empty(&phy->tx_list)) { + struct mt76_wcid *wcid = NULL; + int ret; + + wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list); + list_del_init(&wcid->tx_list); + + spin_unlock(&phy->tx_lock); + ret = mt76_txq_schedule_pending_wcid(phy, wcid); + spin_lock(&phy->tx_lock); + + if (ret) { + if (list_empty(&wcid->tx_list)) + list_add_tail(&wcid->tx_list, &phy->tx_list); + break; + } + } + spin_unlock(&phy->tx_lock); + + rcu_read_unlock(); + local_bh_enable(); +} + void mt76_txq_schedule_all(struct mt76_phy *phy) { int i; + mt76_txq_schedule_pending(phy); for (i = 0; i <= MT_TXQ_BK; i++) mt76_txq_schedule(phy, i); } diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c index 51d977ffc5..5aeeac0dd9 100644 --- a/drivers/net/wireless/mediatek/mt7601u/tx.c +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -110,7 +110,7 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) info->flags |= IEEE80211_TX_STAT_ACK; spin_lock_bh(&dev->mac_lock); - ieee80211_tx_status(dev->hw, skb); + ieee80211_tx_status_skb(dev->hw, skb); spin_unlock_bh(&dev->mac_lock); } diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index cc772045d5..c41ae251cb 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -365,6 +365,7 @@ static int mt7601u_resume(struct usb_interface *usb_intf) MODULE_DEVICE_TABLE(usb, mt7601u_device_table); MODULE_FIRMWARE(MT7601U_FIRMWARE); +MODULE_DESCRIPTION("MediaTek MT7601U USB Wireless LAN driver"); MODULE_LICENSE("GPL"); static struct usb_driver mt7601u_driver = { diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index b545d93c6e..da52f91693 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -1441,11 +1441,11 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev, } static int change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *beacon) + struct cfg80211_ap_update *params) { struct wilc_vif *vif = netdev_priv(dev); - return wilc_add_beacon(vif, 0, 0, beacon); + return wilc_add_beacon(vif, 0, 0, ¶ms->beacon); } static int stop_ap(struct wiphy *wiphy, struct net_device *dev, diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index e9f59de31b..91d71e0f7e 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -148,8 +148,8 @@ static int wilc_txq_task(void *vp) complete(&wl->txq_thread_started); while (1) { - wait_for_completion(&wl->txq_event); - + if (wait_for_completion_interruptible(&wl->txq_event)) + continue; if (wl->close) { complete(&wl->txq_thread_started); @@ -166,12 +166,24 @@ static int wilc_txq_task(void *vp) srcu_idx = srcu_read_lock(&wl->srcu); list_for_each_entry_rcu(ifc, &wl->vif_list, list) { - if (ifc->mac_opened && ifc->ndev) + if (ifc->mac_opened && + netif_queue_stopped(ifc->ndev)) netif_wake_queue(ifc->ndev); } srcu_read_unlock(&wl->srcu, srcu_idx); } - } while (ret == WILC_VMM_ENTRY_FULL_RETRY && !wl->close); + if (ret != WILC_VMM_ENTRY_FULL_RETRY) + break; + /* Back off TX task from sending packets for some time. + * msleep_interruptible will allow RX task to run and + * free buffers. TX task will be in TASK_INTERRUPTIBLE + * state which will put the thread back to CPU running + * queue when it's signaled even if the timeout isn't + * elapsed. This gives faster chance for reserved SK + * buffers to be free. + */ + msleep_interruptible(TX_BACKOFF_WEIGHT_MS); + } while (!wl->close); } return 0; } diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index bb1a315a7b..aafe3dc44a 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -27,6 +27,8 @@ #define TCP_ACK_FILTER_LINK_SPEED_THRESH 54 #define DEFAULT_LINK_SPEED 72 +#define TX_BACKOFF_WEIGHT_MS 1 + struct wilc_wfi_stats { unsigned long rx_packets; unsigned long tx_packets; diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 73e6f9408b..663d77770f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -331,11 +331,11 @@ out: } static int qtnf_change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *info) + struct cfg80211_ap_update *info) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); - return qtnf_mgmt_set_appie(vif, info); + return qtnf_mgmt_set_appie(vif, &info->beacon); } static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 68ae9c7ea9..9540ad6196 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1335,7 +1335,7 @@ static int qtnf_cmd_band_fill_iftype(const u8 *data, return -EINVAL; } - kfree(band->iftype_data); + kfree((__force void *)band->iftype_data); band->iftype_data = NULL; band->n_iftype_data = tlv->n_iftype_data; if (band->n_iftype_data == 0) @@ -1347,7 +1347,8 @@ static int qtnf_cmd_band_fill_iftype(const u8 *data, band->n_iftype_data = 0; return -ENOMEM; } - band->iftype_data = iftype_data; + + _ieee80211_set_sband_iftype_data(band, iftype_data, tlv->n_iftype_data); for (i = 0; i < band->n_iftype_data; i++) qtnf_cmd_conv_iftype(iftype_data++, &tlv->iftype_data[i]); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 2a63ffdc4b..677bac8353 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -535,7 +535,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) if (!wiphy->bands[band]) continue; - kfree(wiphy->bands[band]->iftype_data); + kfree((__force void *)wiphy->bands[band]->iftype_data); wiphy->bands[band]->n_iftype_data = 0; kfree(wiphy->bands[band]->channels); diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 31bc58e96a..3b283e93a1 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -477,9 +477,9 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac, if (!vif->netdev) continue; - mutex_lock(&vif->wdev.mtx); + wiphy_lock(priv_to_wiphy(vif->mac)); cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0); - mutex_unlock(&vif->wdev.mtx); + wiphy_unlock(priv_to_wiphy(vif->mac)); } return 0; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index de2ee5ffc3..48521e4557 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -870,6 +870,18 @@ #define LED_CFG_Y_LED_MODE FIELD32(0x30000000) #define LED_CFG_LED_POLAR FIELD32(0x40000000) +/* + * AMPDU_MAX_LEN_20M1S: Per MCS max A-MPDU length, 20 MHz, MCS 0-7 + * AMPDU_MAX_LEN_20M2S: Per MCS max A-MPDU length, 20 MHz, MCS 8-15 + * AMPDU_MAX_LEN_40M1S: Per MCS max A-MPDU length, 40 MHz, MCS 0-7 + * AMPDU_MAX_LEN_40M2S: Per MCS max A-MPDU length, 40 MHz, MCS 8-15 + * Maximum A-MPDU length = 2^(AMPDU_MAX - 5) kilobytes + */ +#define AMPDU_MAX_LEN_20M1S 0x1030 +#define AMPDU_MAX_LEN_20M2S 0x1034 +#define AMPDU_MAX_LEN_40M1S 0x1038 +#define AMPDU_MAX_LEN_40M2S 0x103C + /* * AMPDU_BA_WINSIZE: Force BlockAck window size * FORCE_WINSIZE_ENABLE: @@ -1545,6 +1557,12 @@ */ #define EXP_ACK_TIME 0x1380 +/* + * HT_FBK_TO_LEGACY: Enable/Disable HT/RTS fallback to OFDM/CCK rate + * Not available for legacy SoCs + */ +#define HT_FBK_TO_LEGACY 0x1384 + /* TX_PWR_CFG_5 */ #define TX_PWR_CFG_5 0x1384 #define TX_PWR_CFG_5_MCS16_CH0 FIELD32(0x0000000f) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index e65cc00fa1..1926ffdffb 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -856,6 +856,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) s8 rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0); s8 rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1); s8 rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2); + s8 base_val = rt2x00_rt(rt2x00dev, RT6352) ? -2 : -12; u16 eeprom; u8 offset0; u8 offset1; @@ -880,9 +881,9 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) * If the value in the descriptor is 0, it is considered invalid * and the default (extremely low) rssi value is assumed */ - rssi0 = (rssi0) ? (-12 - offset0 - rt2x00dev->lna_gain - rssi0) : -128; - rssi1 = (rssi1) ? (-12 - offset1 - rt2x00dev->lna_gain - rssi1) : -128; - rssi2 = (rssi2) ? (-12 - offset2 - rt2x00dev->lna_gain - rssi2) : -128; + rssi0 = (rssi0) ? (base_val - offset0 - rt2x00dev->lna_gain - rssi0) : -128; + rssi1 = (rssi1) ? (base_val - offset1 - rt2x00dev->lna_gain - rssi1) : -128; + rssi2 = (rssi2) ? (base_val - offset2 - rt2x00dev->lna_gain - rssi2) : -128; /* * mac80211 only accepts a single RSSI value. Calculating the @@ -1236,13 +1237,14 @@ void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev) } EXPORT_SYMBOL_GPL(rt2800_txdone_nostatus); -static int rt2800_check_hung(struct data_queue *queue) +static bool rt2800_check_hung(struct data_queue *queue) { unsigned int cur_idx = rt2800_drv_get_dma_done(queue); - if (queue->wd_idx != cur_idx) + if (queue->wd_idx != cur_idx) { + queue->wd_idx = cur_idx; queue->wd_count = 0; - else + } else queue->wd_count++; return queue->wd_count > 16; @@ -1279,7 +1281,7 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) case QID_MGMT: if (rt2x00queue_empty(queue)) continue; - hung_tx = rt2800_check_hung(queue); + hung_tx = hung_tx || rt2800_check_hung(queue); break; case QID_RX: /* For station mode we should reactive at least @@ -1288,7 +1290,7 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) */ if (rt2x00dev->intf_sta_count == 0) continue; - hung_rx = rt2800_check_hung(queue); + hung_rx = hung_rx || rt2800_check_hung(queue); break; default: break; @@ -1301,8 +1303,12 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) if (hung_rx) rt2x00_warn(rt2x00dev, "Watchdog RX hung detected\n"); - if (hung_tx || hung_rx) + if (hung_tx || hung_rx) { + queue_for_each(rt2x00dev, queue) + queue->wd_count = 0; + ieee80211_restart_hw(rt2x00dev->hw); + } } EXPORT_SYMBOL_GPL(rt2800_watchdog); @@ -3855,14 +3861,6 @@ static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev, rfcsr |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr); } - - if (conf_is_ht40(conf)) { - rt2800_bbp_glrt_write(rt2x00dev, 141, 0x10); - rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2f); - } else { - rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1a); - rt2800_bbp_glrt_write(rt2x00dev, 157, 0x40); - } } static void rt2800_config_alc_rt6352(struct rt2x00_dev *rt2x00dev, @@ -4431,66 +4429,45 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, usleep_range(1000, 1500); } - if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) { - reg = 0x10; - if (!conf_is_ht40(conf)) { - if (rt2x00_rt(rt2x00dev, RT6352) && - rt2x00_has_cap_external_lna_bg(rt2x00dev)) { - reg |= 0x5; - } else { - reg |= 0xa; - } - } - rt2800_bbp_write(rt2x00dev, 195, 141); - rt2800_bbp_write(rt2x00dev, 196, reg); + if (rt2x00_rt(rt2x00dev, RT5592)) { + bbp = conf_is_ht40(conf) ? 0x10 : 0x1a; + rt2800_bbp_glrt_write(rt2x00dev, 141, bbp); - /* AGC init. - * Despite the vendor driver using different values here for - * RT6352 chip, we use 0x1c for now. This may have to be changed - * once TSSI got implemented. - */ - reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain; - rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); + bbp = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain; + rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, bbp); - if (rt2x00_rt(rt2x00dev, RT5592)) - rt2800_iq_calibrate(rt2x00dev, rf->channel); + rt2800_iq_calibrate(rt2x00dev, rf->channel); } if (rt2x00_rt(rt2x00dev, RT6352)) { - if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, - &rt2x00dev->cap_flags)) { - reg = rt2800_register_read(rt2x00dev, RF_CONTROL3); - reg |= 0x00000101; - rt2800_register_write(rt2x00dev, RF_CONTROL3, reg); - - reg = rt2800_register_read(rt2x00dev, RF_BYPASS3); - reg |= 0x00000101; - rt2800_register_write(rt2x00dev, RF_BYPASS3, reg); - - rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73); - rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73); - rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73); - rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); - rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xC8); - rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xA4); - rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05); - rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); - rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xC8); - rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xA4); - rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05); - rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27); - rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xC8); - rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xA4); - rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05); - rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00); - - rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, - 0x36303636); - rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, - 0x6C6C6B6C); - rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, - 0x6C6C6B6C); + /* BBP for GLRT BW */ + bbp = conf_is_ht40(conf) ? + 0x10 : rt2x00_has_cap_external_lna_bg(rt2x00dev) ? + 0x15 : 0x1a; + rt2800_bbp_glrt_write(rt2x00dev, 141, bbp); + + bbp = conf_is_ht40(conf) ? 0x2f : 0x40; + rt2800_bbp_glrt_write(rt2x00dev, 157, bbp); + + if (rt2x00dev->default_ant.rx_chain_num == 1) { + rt2800_bbp_write(rt2x00dev, 91, 0x07); + rt2800_bbp_write(rt2x00dev, 95, 0x1a); + rt2800_bbp_glrt_write(rt2x00dev, 128, 0xa0); + rt2800_bbp_glrt_write(rt2x00dev, 170, 0x12); + rt2800_bbp_glrt_write(rt2x00dev, 171, 0x10); + } else { + rt2800_bbp_write(rt2x00dev, 91, 0x06); + rt2800_bbp_write(rt2x00dev, 95, 0x9a); + rt2800_bbp_glrt_write(rt2x00dev, 128, 0xe0); + rt2800_bbp_glrt_write(rt2x00dev, 170, 0x30); + rt2800_bbp_glrt_write(rt2x00dev, 171, 0x30); } + + /* AGC init */ + bbp = rf->channel <= 14 ? 0x04 + 2 * rt2x00dev->lna_gain : 0; + rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, bbp); + + usleep_range(1000, 1500); } bbp = rt2800_bbp_read(rt2x00dev, 4); @@ -5600,43 +5577,6 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) } } rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); - - if (rt2x00_rt(rt2x00dev, RT6352)) { - if (rt2x00dev->default_ant.rx_chain_num == 1) { - rt2800_bbp_write(rt2x00dev, 91, 0x07); - rt2800_bbp_write(rt2x00dev, 95, 0x1A); - rt2800_bbp_write(rt2x00dev, 195, 128); - rt2800_bbp_write(rt2x00dev, 196, 0xA0); - rt2800_bbp_write(rt2x00dev, 195, 170); - rt2800_bbp_write(rt2x00dev, 196, 0x12); - rt2800_bbp_write(rt2x00dev, 195, 171); - rt2800_bbp_write(rt2x00dev, 196, 0x10); - } else { - rt2800_bbp_write(rt2x00dev, 91, 0x06); - rt2800_bbp_write(rt2x00dev, 95, 0x9A); - rt2800_bbp_write(rt2x00dev, 195, 128); - rt2800_bbp_write(rt2x00dev, 196, 0xE0); - rt2800_bbp_write(rt2x00dev, 195, 170); - rt2800_bbp_write(rt2x00dev, 196, 0x30); - rt2800_bbp_write(rt2x00dev, 195, 171); - rt2800_bbp_write(rt2x00dev, 196, 0x30); - } - - if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { - rt2800_bbp_write(rt2x00dev, 75, 0x68); - rt2800_bbp_write(rt2x00dev, 76, 0x4C); - rt2800_bbp_write(rt2x00dev, 79, 0x1C); - rt2800_bbp_write(rt2x00dev, 80, 0x0C); - rt2800_bbp_write(rt2x00dev, 82, 0xB6); - } - - /* On 11A, We should delay and wait RF/BBP to be stable - * and the appropriate time should be 1000 micro seconds - * 2005/06/05 - On 11G, we also need this delay time. - * Otherwise it's difficult to pass the WHQL. - */ - usleep_range(1000, 1500); - } } EXPORT_SYMBOL_GPL(rt2800_vco_calibration); @@ -5845,6 +5785,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; u16 eeprom; + u8 bbp; unsigned int i; int ret; @@ -5854,6 +5795,19 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) if (ret) return ret; + if (rt2x00_rt(rt2x00dev, RT6352)) { + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x01); + + bbp = rt2800_bbp_read(rt2x00dev, 21); + bbp |= 0x01; + rt2800_bbp_write(rt2x00dev, 21, bbp); + bbp = rt2800_bbp_read(rt2x00dev, 21); + bbp &= (~0x01); + rt2800_bbp_write(rt2x00dev, 21, bbp); + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00); + } + rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f); rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); @@ -6007,6 +5961,14 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_1); rt2x00_set_field32(®, TX_ALC_CFG_1_ROS_BUSY_EN, 0); rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg); + + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_20M1S, 0x77754433); + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_20M2S, 0x77765543); + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_40M1S, 0x77765544); + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_40M2S, 0x77765544); + + rt2800_register_write(rt2x00dev, HT_FBK_TO_LEGACY, 0x1010); + } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -7225,6 +7187,8 @@ static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_dcoc_write(rt2x00dev, 159, 0x64); rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 84, 0x19); } static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) @@ -8695,7 +8659,7 @@ static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4); rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, saverfb7r4); - rt2800_bbp_write(rt2x00dev, 158, 141); + rt2800_bbp_write(rt2x00dev, 158, 140); bbpreg = rt2800_bbp_read(rt2x00dev, 159); bbpreg = bbpreg & (~0x40); rt2800_bbp_write(rt2x00dev, 159, bbpreg); @@ -9700,9 +9664,6 @@ static void rt2800_loft_iq_calibration(struct rt2x00_dev *rt2x00dev) rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx], rfvga_gain_table[vga_gain[ch_idx]]); - - if (vga_gain[ch_idx] < 0) - vga_gain[ch_idx] = 0; } rfvalue = rfvga_gain_table[vga_gain[ch_idx]]; @@ -10339,6 +10300,128 @@ do_cal: rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0); } +static void rt2800_restore_rf_bbp_rt6352(struct rt2x00_dev *rt2x00dev) +{ + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0); + rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0); + } + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x16); + rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x23); + rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x02); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xd3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xb3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xd5); + rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x6c); + rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xfc); + rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1f); + rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66); + rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xff); + rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x1c); + rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x20); + rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6b); + rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xf7); + rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x09); + } + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_bbp_write(rt2x00dev, 75, 0x60); + rt2800_bbp_write(rt2x00dev, 76, 0x44); + rt2800_bbp_write(rt2x00dev, 79, 0x1c); + rt2800_bbp_write(rt2x00dev, 80, 0x0c); + rt2800_bbp_write(rt2x00dev, 82, 0xB6); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, 0x3630363a); + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6c6c666c); + rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6c6c666c); + } +} + +static void rt2800_calibration_rt6352(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + if (rt2x00_has_cap_external_pa(rt2x00dev) || + rt2x00_has_cap_external_lna_bg(rt2x00dev)) + rt2800_restore_rf_bbp_rt6352(rt2x00dev); + + rt2800_r_calibration(rt2x00dev); + rt2800_rf_self_txdc_cal(rt2x00dev); + rt2800_rxdcoc_calibration(rt2x00dev); + rt2800_bw_filter_calibration(rt2x00dev, true); + rt2800_bw_filter_calibration(rt2x00dev, false); + rt2800_loft_iq_calibration(rt2x00dev); + + /* missing DPD calibration for internal PA devices */ + + rt2800_rxdcoc_calibration(rt2x00dev); + rt2800_rxiq_calibration(rt2x00dev); + + if (!rt2x00_has_cap_external_pa(rt2x00dev) && + !rt2x00_has_cap_external_lna_bg(rt2x00dev)) + return; + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + reg = rt2800_register_read(rt2x00dev, RF_CONTROL3); + reg |= 0x00000101; + rt2800_register_write(rt2x00dev, RF_CONTROL3, reg); + + reg = rt2800_register_read(rt2x00dev, RF_BYPASS3); + reg |= 0x00000101; + rt2800_register_write(rt2x00dev, RF_BYPASS3, reg); + } + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x66); + rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x20); + rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x42); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73); + rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73); + rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73); + rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xc8); + rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xa4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05); + rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xc8); + rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xa4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05); + rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xc8); + rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xa4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) + rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00); + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_bbp_write(rt2x00dev, 75, 0x68); + rt2800_bbp_write(rt2x00dev, 76, 0x4c); + rt2800_bbp_write(rt2x00dev, 79, 0x1c); + rt2800_bbp_write(rt2x00dev, 80, 0x0c); + rt2800_bbp_write(rt2x00dev, 82, 0xb6); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, 0x36303636); + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6c6c6b6c); + rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6c6c6b6c); + } +} + static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev) { /* Initialize RF central register to default value */ @@ -10603,13 +10686,8 @@ static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C); - rt2800_r_calibration(rt2x00dev); - rt2800_rf_self_txdc_cal(rt2x00dev); - rt2800_rxdcoc_calibration(rt2x00dev); - rt2800_bw_filter_calibration(rt2x00dev, true); - rt2800_bw_filter_calibration(rt2x00dev, false); - rt2800_loft_iq_calibration(rt2x00dev); - rt2800_rxiq_calibration(rt2x00dev); + /* Do calibration and init PA/LNA */ + rt2800_calibration_rt6352(rt2x00dev); } static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index 862098f753..5323acff96 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -760,6 +760,9 @@ int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); + if (rt2x00_rt(rt2x00dev, RT6352)) + return 0; + reg = 0; rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 07a6a5a9ce..aaaf993319 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1262,6 +1262,12 @@ rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev) return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG); } +static inline bool +rt2x00_has_cap_external_pa(struct rt2x00_dev *rt2x00dev) +{ + return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_PA_TX0); +} + static inline bool rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 9a9cfd0ce4..9e7d9dbe95 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -101,6 +101,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) rt2x00link_stop_tuner(rt2x00dev); rt2x00queue_stop_queues(rt2x00dev); rt2x00queue_flush_queues(rt2x00dev, true); + rt2x00queue_stop_queue(rt2x00dev->bcn); /* * Disable radio. @@ -533,7 +534,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, */ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT)) - ieee80211_tx_status(rt2x00dev->hw, entry->skb); + ieee80211_tx_status_skb(rt2x00dev->hw, entry->skb); else ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); } else { @@ -1286,6 +1287,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; + rt2x00dev->intf_beaconing = 0; /* Enable the radio */ retval = rt2x00lib_enable_radio(rt2x00dev); @@ -1312,6 +1314,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; + rt2x00dev->intf_beaconing = 0; } static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 4202c65177..75fda72c14 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -598,6 +598,17 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, */ if (changes & BSS_CHANGED_BEACON_ENABLED) { mutex_lock(&intf->beacon_skb_mutex); + + /* + * Clear the 'enable_beacon' flag and clear beacon because + * the beacon queue has been stopped after hardware reset. + */ + if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags) && + intf->enable_beacon) { + intf->enable_beacon = false; + rt2x00queue_clear_beacon(rt2x00dev, vif); + } + if (!bss_conf->enable_beacon && intf->enable_beacon) { rt2x00dev->intf_beaconing--; intf->enable_beacon = false; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 5d102a1246..180907319e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -7500,6 +7500,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, case 0x8179: case 0xb711: case 0xf192: + case 0x2005: untested = 0; break; } @@ -7800,6 +7801,7 @@ static const struct usb_device_id dev_table[] = { /* Asus USB-N13 rev C1 */ {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x18f1, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192fu_fops}, +/* EDIMAX EW-7722UTn V3 */ {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb722, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192fu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x318b, 0xff, 0xff, 0xff), @@ -7959,6 +7961,18 @@ static const struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8192eu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, +/* D-Link DWA-131 rev C1 */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3312, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +/* TP-Link TL-WN8200ND V2 */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0126, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +/* Mercusys MW300UM */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0100, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, +/* Mercusys MW300UH */ +{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0104, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, #endif { } }; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 807a53a973..7ce37fb4fd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1317,12 +1317,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) struct rtl_priv *rtlpriv = rtl_priv(hw); __le16 fc = rtl_get_fc(skb); - if (rtlpriv->dm.supp_phymode_switch && - mac->link_state < MAC80211_LINKED && - (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } if (ieee80211_is_auth(fc)) { rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 3835b639d4..69e97647e3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -662,13 +662,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) if (mac->act_scanning) mac->n_channels++; - if (rtlpriv->dm.supp_phymode_switch && - mac->link_state < MAC80211_LINKED && - !mac->act_scanning) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } - /* *because we should back channel to *current_network.chan in scanning, @@ -1197,10 +1190,6 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, mac->vendor = PEER_UNKNOWN; mac->mode = 0; - if (rtlpriv->dm.supp_phymode_switch) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG, "BSS_CHANGED_UN_ASSOC\n"); } @@ -1464,11 +1453,6 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv, 1); - if (rtlpriv->dm.supp_phymode_switch) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } - if (mac->link_state == MAC80211_LINKED) { rtl_lps_leave(hw, true); mac->link_state = MAC80211_LINKED_SCANNING; @@ -1897,7 +1881,7 @@ bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) /*this is wrong, fill_tx_cmddesc needs update*/ pdesc = &ring->desc[0]; - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, skb); __skb_queue_tail(&ring->queue, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 629c03271b..6241e4fed4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -681,25 +681,10 @@ void rtl_swlps_wq_callback(struct work_struct *work) ps_work.work); struct ieee80211_hw *hw = rtlworks->hw; struct rtl_priv *rtlpriv = rtl_priv(hw); - bool ps = false; - - ps = (hw->conf.flags & IEEE80211_CONF_PS); /* we can sleep after ps null send ok */ - if (rtlpriv->psc.state_inap) { + if (rtlpriv->psc.state_inap) rtl_swlps_rf_sleep(hw); - - if (rtlpriv->psc.state && !ps) { - rtlpriv->psc.sleep_ms = jiffies_to_msecs(jiffies - - rtlpriv->psc.last_action); - } - - if (ps) - rtlpriv->psc.last_slept = jiffies; - - rtlpriv->psc.last_action = jiffies; - rtlpriv->psc.state = ps; - } } static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 58b1a46066..27f6c35ba0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -433,14 +433,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *val; if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 65ebe52883..d094163a9a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -665,9 +665,8 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -687,8 +686,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index e17f70b4d1..aae654b0e3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -797,6 +797,5 @@ bool rtl88ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 049c4fe9ee..0bc915723b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -208,14 +208,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *val; if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 5376bb3425..50e139186a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -518,9 +518,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -540,9 +539,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); set_tx_desc_tx_rate(pdesc, DESC_RATE1M); set_tx_desc_seq(pdesc, 0); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h index b45b05a6a5..f3ffe3d988 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h @@ -527,6 +527,5 @@ bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index a040c07791..5ec0eb8773 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1539,7 +1539,7 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) * if its "here". * * This is maybe necessary: - * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb); + * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, skb); */ dev_kfree_skb(skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index e6403d4c93..20b4aac696 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -102,7 +102,6 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .set_hw_reg = rtl92cu_set_hw_reg, .update_rate_tbl = rtl92cu_update_hal_rate_tbl, .fill_tx_desc = rtl92cu_tx_fill_desc, - .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, .query_rx_desc = rtl92cu_rx_query_desc, .set_channel_access = rtl92cu_update_channel_access_setting, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index b70767e72f..2f44c8aa60 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -600,35 +600,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n"); } -void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc8, - u32 buffer_len, bool is_pspoll) -{ - __le32 *pdesc = (__le32 *)pdesc8; - - /* Clear all status */ - memset(pdesc, 0, RTL_TX_HEADER_SIZE); - set_tx_desc_first_seg(pdesc, 1); /* bFirstSeg; */ - set_tx_desc_last_seg(pdesc, 1); /* bLastSeg; */ - set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ - set_tx_desc_pkt_size(pdesc, buffer_len); /* Buffer size + command hdr */ - set_tx_desc_queue_sel(pdesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ - /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error - * vlaue by Hw. */ - if (is_pspoll) { - set_tx_desc_nav_use_hdr(pdesc, 1); - } else { - set_tx_desc_hwseq_en(pdesc, 1); /* Hw set sequence number */ - set_tx_desc_pkt_id(pdesc, BIT(3)); /* set bit3 to 1. */ - } - set_tx_desc_use_rate(pdesc, 1); /* use data rate which is set by Sw */ - set_tx_desc_own(pdesc, 1); - set_tx_desc_tx_rate(pdesc, DESC_RATE1M); - _rtl_tx_desc_checksum(pdesc); -} - -void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 fw_queue = QSLT_BEACON; @@ -637,8 +610,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, __le32 *pdesc = (__le32 *)pdesc8; memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); + set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); set_tx_desc_tx_rate(pdesc, DESC_RATE1M); set_tx_desc_seq(pdesc, 0); set_tx_desc_linip(pdesc, 0); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index 171fe39dfb..5f81cab205 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -394,10 +394,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 queue_index, struct rtl_tcb_desc *tcb_desc); -void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc, - u32 buffer_len, bool ispspoll); -void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool b_firstseg, - bool b_lastseg, struct sk_buff *skb); +void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c index 6cc9c7649e..cf4aca83bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c @@ -592,32 +592,18 @@ static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + const u32 edca_be_ul = 0x5ea42b; + const u32 edca_be_dl = 0x5ea42b; static u64 last_txok_cnt; static u64 last_rxok_cnt; u64 cur_txok_cnt; u64 cur_rxok_cnt; - u32 edca_be_ul = 0x5ea42b; - u32 edca_be_dl = 0x5ea42b; if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; goto exit; } - /* Enable BEQ TxOP limit configuration in wireless G-mode. */ - /* To check whether we shall force turn on TXOP configuration. */ - if ((!rtlpriv->dm.disable_framebursting) && - (rtlpriv->sec.pairwise_enc_algorithm == WEP40_ENCRYPTION || - rtlpriv->sec.pairwise_enc_algorithm == WEP104_ENCRYPTION || - rtlpriv->sec.pairwise_enc_algorithm == TKIP_ENCRYPTION)) { - /* Force TxOP limit to 0x005e for UL. */ - if (!(edca_be_ul & 0xffff0000)) - edca_be_ul |= 0x005e0000; - /* Force TxOP limit to 0x005e for DL. */ - if (!(edca_be_dl & 0xffff0000)) - edca_be_dl |= 0x005e0000; - } - if ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting)) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 9ddb847878..e1fb299628 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -469,7 +469,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw, pdesc = &ring->desc[idx]; /* discard output from call below */ rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN); - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 31a18bbfac..743ac6871b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -225,13 +225,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AMPDU_MIN_SPACE: { u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *val; if (min_spacing_to_set <= 7) { - sec_min_space = 0; - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | min_spacing_to_set); *val = min_spacing_to_set; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index c09c0c3126..02ac69c08e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -655,9 +655,8 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -678,8 +677,7 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, return; } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); /* 5G have no CCK rate * Caution: The macros below are multi-line expansions. * The braces are needed no matter what checkpatch says diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index d01578875c..2992668c15 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -564,7 +564,6 @@ bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c index 997ff115b9..5a828a934f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c @@ -936,8 +936,7 @@ void rtl92ee_dm_init(struct ieee80211_hw *hw) static void rtl92ee_dm_common_info_self_update(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_sta_info *drv_priv; - u8 cnt = 0; + u8 cnt; rtlpriv->dm.one_entry_only = false; @@ -951,9 +950,7 @@ static void rtl92ee_dm_common_info_self_update(struct ieee80211_hw *hw) rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); - list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { - cnt++; - } + cnt = list_count_nodes(&rtlpriv->entry_list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); if (cnt == 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 616a47d8d9..011ce82efe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -199,7 +199,6 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .get_hw_reg = rtl92ee_get_hw_reg, .set_hw_reg = rtl92ee_set_hw_reg, .update_rate_tbl = rtl92ee_update_hal_rate_tbl, - .pre_fill_tx_bd_desc = rtl92ee_pre_fill_tx_bd_desc, .rx_desc_buff_remained_cnt = rtl92ee_rx_desc_buff_remained_cnt, .rx_check_dma_ok = rtl92ee_rx_check_dma_ok, .fill_tx_desc = rtl92ee_tx_fill_desc, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index a182cdeb58..16589e1849 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -550,9 +550,11 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) return point_diff; } -void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, - u8 *tx_bd_desc8, u8 *desc8, u8 queue_index, - struct sk_buff *skb, dma_addr_t addr) +static void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, + u8 *tx_bd_desc8, u8 *desc8, + u8 queue_index, + struct sk_buff *skb, + dma_addr_t addr) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -827,9 +829,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -846,8 +847,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, txdesc_len); - if (firstseg) - set_tx_desc_offset(pdesc, txdesc_len); + set_tx_desc_offset(pdesc, txdesc_len); set_tx_desc_tx_rate(pdesc, DESC_RATE1M); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index 967cef3a9c..4c6cf4f16f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -720,9 +720,6 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc, u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index); u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index); -void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, - u8 *tx_bd_desc, u8 *desc, u8 queue_index, - struct sk_buff *skb, dma_addr_t addr); void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, @@ -743,6 +740,5 @@ u64 rtl92ee_get_desc(struct ieee80211_hw *hw, bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index f570495af0..579b1789d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c @@ -122,7 +122,7 @@ static bool _rtl92s_cmd_send_packet(struct ieee80211_hw *hw, idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; pdesc = &ring->desc[idx]; - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index a5853a170b..f104cdb649 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -492,7 +492,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, } void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, - bool firstseg, bool lastseg, struct sk_buff *skb) + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h index 90aa12fc6a..40291a6a15 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h @@ -10,8 +10,8 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); -void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, - bool lastseg, struct sk_buff *skb); +void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + struct sk_buff *skb); bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index 53af0d209b..b34dffc6a3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c @@ -1122,7 +1122,7 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) /* Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO */ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", + "[BTCoex], BT btInqPageStartTime = 0x%lx, btTxRxCntLvl = %d\n", hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl); if ((hal_coex_8723.bt_inq_page_start_time) || (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) { @@ -1335,7 +1335,7 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw) btdm8723.dec_bt_pwr = true; rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", + "[BTCoex], BT btInqPageStartTime = 0x%lx, btTxRxCntLvl = %d\n", hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl); if ((hal_coex_8723.bt_inq_page_start_time) || @@ -1358,9 +1358,8 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw) static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 cur_time; + unsigned long cur_time = jiffies; - cur_time = jiffies; if (hal_coex_8723.c2h_bt_inquiry_page) { /* bt inquiry or page is started. */ if (hal_coex_8723.bt_inq_page_start_time == 0) { @@ -1368,18 +1367,17 @@ static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) BT_COEX_STATE_BT_INQ_PAGE; hal_coex_8723.bt_inq_page_start_time = cur_time; rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT Inquiry/page is started at time : 0x%x\n", + "[BTCoex], BT Inquiry/page is started at time : 0x%lx\n", hal_coex_8723.bt_inq_page_start_time); } } rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n", + "[BTCoex], BT Inquiry/page started time : 0x%lx, cur_time : 0x%lx\n", hal_coex_8723.bt_inq_page_start_time, cur_time); if (hal_coex_8723.bt_inq_page_start_time) { - if ((((long)cur_time - - (long)hal_coex_8723.bt_inq_page_start_time) / HZ) - >= 10) { + if (jiffies_to_msecs(cur_time - + hal_coex_8723.bt_inq_page_start_time) >= 10000) { rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT Inquiry/page >= 10sec!!!\n"); hal_coex_8723.bt_inq_page_start_time = 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index d26d4c4314..6991713a66 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -212,14 +212,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *((u8 *)val); if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index fe9b407dc2..71e29b103d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -49,7 +49,7 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw, rfpath, regaddr); } - bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock(&rtlpriv->locks.rf_lock); @@ -80,7 +80,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw, original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); @@ -89,7 +89,7 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw, rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data); } else { if (bitmask != RFREG_OFFSET_MASK) { - bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 7f294e6989..d9823ddab7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -519,9 +519,8 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -541,8 +540,7 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 2d25f62a4d..f352fddfff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -530,6 +530,5 @@ bool rtl8723e_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c index c3c990cc03..c53f951448 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c @@ -1210,8 +1210,7 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw) static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 cnt = 0; - struct rtl_sta_info *drv_priv; + u8 cnt; rtlpriv->dm.one_entry_only = false; @@ -1225,9 +1224,7 @@ static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw) rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); - list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { - cnt++; - } + cnt = list_count_nodes(&rtlpriv->entry_list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); if (cnt == 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 1557564455..0e77de1baa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -468,15 +468,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *((u8 *)val); if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; - mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | min_spacing_to_set); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 2b9313cb93..094cb36153 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -41,7 +41,7 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, spin_lock(&rtlpriv->locks.rf_lock); original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock(&rtlpriv->locks.rf_lock); @@ -68,7 +68,7 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path, if (bitmask != RFREG_OFFSET_MASK) { original_value = rtl8723_phy_rf_serial_read(hw, path, regaddr); - bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 24ef7cc52e..8b6352f7f9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -585,7 +585,6 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, } void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, - bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 174aca20c7..da027f915c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -642,6 +642,5 @@ bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index f3fe16798c..76b5395539 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -827,8 +827,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw) static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 cnt = 0; - struct rtl_sta_info *drv_priv; + u8 cnt; rtlpriv->dm.tx_rate = 0xff; @@ -844,8 +843,7 @@ static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); - list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) - cnt++; + cnt = list_count_nodes(&rtlpriv->entry_list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); if (cnt == 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 3f8f6da33b..1633328bc3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -546,14 +546,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *((u8 *)val); if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index d7cb3319d8..bd71592fe2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -828,9 +828,8 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index a9ed6fd410..1155365348 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -648,6 +648,5 @@ bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 8cbf3fb388..5d842cc394 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1597,7 +1597,7 @@ struct bt_coexist_8723 { u8 c2h_bt_info; bool c2h_bt_info_req_sent; bool c2h_bt_inquiry_page; - u32 bt_inq_page_start_time; + unsigned long bt_inq_page_start_time; u8 bt_retry_cnt; u8 c2h_bt_info_original; u8 bt_inquiry_page_cnt; @@ -2032,19 +2032,15 @@ struct rtl_ps_ctl { /* for SW LPS*/ bool sw_ps_enabled; - bool state; bool state_inap; bool multi_buffered; u16 nullfunc_seq; unsigned int dtim_counter; - unsigned int sleep_ms; unsigned long last_sleep_jiffies; unsigned long last_awake_jiffies; unsigned long last_delaylps_stamp_jiffies; unsigned long last_dtim; unsigned long last_beacon; - unsigned long last_action; - unsigned long last_slept; /*For P2P PS */ struct rtl_p2p_ps_info p2p_ps_info; @@ -2231,9 +2227,6 @@ struct rtl_hal_ops { void (*update_rate_tbl)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 rssi_leve, bool update_bw); - void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc, - u8 *desc, u8 queue_index, - struct sk_buff *skb, dma_addr_t addr); void (*update_rate_mask)(struct ieee80211_hw *hw, u8 rssi_level); u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw, u8 queue_index); @@ -2246,10 +2239,7 @@ struct rtl_hal_ops { struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); - void (*fill_fake_txdesc)(struct ieee80211_hw *hw, u8 *pdesc, - u32 buffer_len, bool bsspspoll); void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); void (*fill_tx_special_desc)(struct ieee80211_hw *hw, u8 *pdesc, u8 *pbd_desc, @@ -2285,7 +2275,6 @@ struct rtl_hal_ops { void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask, u32 data); void (*linked_set_reg)(struct ieee80211_hw *hw); - void (*chk_switch_dmdp)(struct ieee80211_hw *hw); void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw); bool (*phy_rf6052_config)(struct ieee80211_hw *hw); void (*phy_rf6052_set_cck_txpower)(struct ieee80211_hw *hw, @@ -2708,7 +2697,7 @@ struct rtl_c2hcmd { struct rtl_bssid_entry { struct list_head list; u8 bssid[ETH_ALEN]; - u32 age; + unsigned long age; }; struct rtl_scan_list { diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index a9149c6c2b..a03ced11bb 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -48,11 +48,23 @@ void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, #define rtw_dbg(rtwdev, a...) __rtw_dbg(rtwdev, ##a) +static inline bool rtw_dbg_is_enabled(struct rtw_dev *rtwdev, + enum rtw_debug_mask mask) +{ + return !!(rtw_debug_mask & mask); +} + #else static inline void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, const char *fmt, ...) {} +static inline bool rtw_dbg_is_enabled(struct rtw_dev *rtwdev, + enum rtw_debug_mask mask) +{ + return false; +} + #endif /* CONFIG_RTW88_DEBUG */ #define rtw_info(rtwdev, a...) dev_info(rtwdev->dev, ##a) diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index a1b674e3ca..acd78311c8 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -17,6 +17,79 @@ #include "phy.h" #include "mac.h" +static const struct rtw_hw_reg_desc fw_h2c_regs[] = { + {REG_FWIMR, MASKDWORD, "FWIMR"}, + {REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "FWIMR enable"}, + {REG_FWISR, MASKDWORD, "FWISR"}, + {REG_FWISR, BIT_FS_H2CCMD_INT, "FWISR enable"}, + {REG_HMETFR, BIT_INT_BOX_ALL, "BoxBitMap"}, + {REG_HMEBOX0, MASKDWORD, "MSG 0"}, + {REG_HMEBOX0_EX, MASKDWORD, "MSG_EX 0"}, + {REG_HMEBOX1, MASKDWORD, "MSG 1"}, + {REG_HMEBOX1_EX, MASKDWORD, "MSG_EX 1"}, + {REG_HMEBOX2, MASKDWORD, "MSG 2"}, + {REG_HMEBOX2_EX, MASKDWORD, "MSG_EX 2"}, + {REG_HMEBOX3, MASKDWORD, "MSG 3"}, + {REG_HMEBOX3_EX, MASKDWORD, "MSG_EX 3"}, + {REG_FT1IMR, MASKDWORD, "FT1IMR"}, + {REG_FT1IMR, BIT_FS_H2C_CMD_OK_INT_EN, "FT1IMR enable"}, + {REG_FT1ISR, MASKDWORD, "FT1ISR"}, + {REG_FT1ISR, BIT_FS_H2C_CMD_OK_INT, "FT1ISR enable "}, +}; + +static const struct rtw_hw_reg_desc fw_c2h_regs[] = { + {REG_FWIMR, MASKDWORD, "FWIMR"}, + {REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "CPWM"}, + {REG_FWIMR, BIT_FS_HRCV_INT_EN, "HRECV"}, + {REG_FWISR, MASKDWORD, "FWISR"}, + {REG_FWISR, BIT_FS_H2CCMD_INT, "CPWM"}, + {REG_FWISR, BIT_FS_HRCV_INT, "HRECV"}, + {REG_CPWM, MASKDWORD, "REG_CPWM"}, +}; + +static const struct rtw_hw_reg_desc fw_core_regs[] = { + {REG_ARFR2_V1, MASKDWORD, "EPC"}, + {REG_ARFRH2_V1, MASKDWORD, "BADADDR"}, + {REG_ARFR3_V1, MASKDWORD, "CAUSE"}, + {REG_ARFR3_V1, BIT_EXC_CODE, "ExcCode"}, + {REG_ARFRH3_V1, MASKDWORD, "Status"}, + {REG_ARFR4, MASKDWORD, "SP"}, + {REG_ARFRH4, MASKDWORD, "RA"}, + {REG_FW_DBG6, MASKDWORD, "DBG 6"}, + {REG_FW_DBG7, MASKDWORD, "DBG 7"}, +}; + +static void _rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev, + const struct rtw_hw_reg_desc regs[], u32 size) +{ + const struct rtw_hw_reg_desc *reg; + u32 val; + int i; + + for (i = 0; i < size; i++) { + reg = ®s[i]; + val = rtw_read32_mask(rtwdev, reg->addr, reg->mask); + + rtw_dbg(rtwdev, RTW_DBG_FW, "[%s]addr:0x%x mask:0x%x value:0x%x\n", + reg->desc, reg->addr, reg->mask, val); + } +} + +void rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev) +{ + int i; + + if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_FW)) + return; + + _rtw_fw_dump_dbg_info(rtwdev, fw_h2c_regs, ARRAY_SIZE(fw_h2c_regs)); + _rtw_fw_dump_dbg_info(rtwdev, fw_c2h_regs, ARRAY_SIZE(fw_c2h_regs)); + for (i = 0 ; i < RTW_DEBUG_DUMP_TIMES; i++) { + rtw_dbg(rtwdev, RTW_DBG_FW, "Firmware Coredump %dth\n", i + 1); + _rtw_fw_dump_dbg_info(rtwdev, fw_core_regs, ARRAY_SIZE(fw_core_regs)); + } +} + static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb) { @@ -349,6 +422,7 @@ static void rtw_fw_send_h2c_command_register(struct rtw_dev *rtwdev, if (ret) { rtw_err(rtwdev, "failed to send h2c command\n"); + rtw_fw_dump_dbg_info(rtwdev); return; } diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 43ccdf9965..84e47c71ea 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -44,6 +44,8 @@ #define RTW_OLD_PROBE_PG_CNT 2 #define RTW_PROBE_PG_CNT 4 +#define RTW_DEBUG_DUMP_TIMES 10 + enum rtw_c2h_cmd_id { C2H_CCX_TX_RPT = 0x03, C2H_BT_INFO = 0x09, @@ -808,6 +810,7 @@ static inline bool rtw_fw_feature_ext_check(struct rtw_fw_state *fw, return !!(fw->feature_ext & feature); } +void rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev); void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, struct sk_buff *skb); void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index c42ef8294d..b6bfd4c02e 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -342,8 +342,10 @@ enum rtw_regulatory_domains { RTW_REGD_UKRAINE = 7, RTW_REGD_MEXICO = 8, RTW_REGD_CN = 9, - RTW_REGD_WW, + RTW_REGD_QATAR = 10, + RTW_REGD_UK = 11, + RTW_REGD_WW, RTW_REGD_MAX }; @@ -522,6 +524,12 @@ struct rtw_hw_reg { u32 mask; }; +struct rtw_hw_reg_desc { + u32 addr; + u32 mask; + const char *desc; +}; + struct rtw_ltecoex_addr { u32 ctrl; u32 wdata; diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c index 07e8cbd436..add5a20b84 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.c +++ b/drivers/net/wireless/realtek/rtw88/ps.c @@ -104,6 +104,7 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter) */ WARN(1, "firmware failed to ack driver for %s Deep Power mode\n", enter ? "entering" : "leaving"); + rtw_fw_dump_dbg_info(rtwdev); } } EXPORT_SYMBOL(rtw_power_mode_change); @@ -164,6 +165,7 @@ static void rtw_fw_leave_lps_check(struct rtw_dev *rtwdev) if (ret) { rtw_write32_clr(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN); rtw_warn(rtwdev, "firmware failed to leave lps state\n"); + rtw_fw_dump_dbg_info(rtwdev); } } diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 7c6c11d50f..1634f03784 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -224,12 +224,25 @@ #define REG_RXFF_BNDY 0x011C #define REG_FE1IMR 0x0120 #define BIT_FS_RXDONE BIT(16) +#define REG_CPWM 0x012C +#define REG_FWIMR 0x0130 +#define BIT_FS_H2CCMD_INT_EN BIT(4) +#define BIT_FS_HRCV_INT_EN BIT(5) +#define REG_FWISR 0x0134 +#define BIT_FS_H2CCMD_INT BIT(4) +#define BIT_FS_HRCV_INT BIT(5) #define REG_PKTBUF_DBG_CTRL 0x0140 #define REG_C2HEVT 0x01A0 #define REG_MCUTST_1 0x01C0 #define REG_MCUTST_II 0x01C4 #define REG_WOWLAN_WAKE_REASON 0x01C7 #define REG_HMETFR 0x01CC +#define BIT_INT_BOX0 BIT(0) +#define BIT_INT_BOX1 BIT(1) +#define BIT_INT_BOX2 BIT(2) +#define BIT_INT_BOX3 BIT(3) +#define BIT_INT_BOX_ALL (BIT_INT_BOX0 | BIT_INT_BOX1 | BIT_INT_BOX2 | \ + BIT_INT_BOX3) #define REG_HMEBOX0 0x01D0 #define REG_HMEBOX1 0x01D4 #define REG_HMEBOX2 0x01D8 @@ -338,6 +351,11 @@ #define BIT_EN_GNT_BT_AWAKE BIT(3) #define BIT_EN_EOF_V1 BIT(2) #define REG_DATA_SC 0x0483 +#define REG_ARFR2_V1 0x048C +#define REG_ARFRH2_V1 0x0490 +#define REG_ARFR3_V1 0x0494 +#define BIT_EXC_CODE GENMASK(6, 2) +#define REG_ARFRH3_V1 0x0498 #define REG_ARFR4 0x049C #define BIT_WL_RFK BIT(0) #define REG_ARFRH4 0x04A0 @@ -548,11 +566,16 @@ #define REG_H2C_PKT_READADDR 0x10D0 #define REG_H2C_PKT_WRITEADDR 0x10D4 +#define REG_FW_DBG6 0x10F8 #define REG_FW_DBG7 0x10FC #define FW_KEY_MASK 0xffffff00 #define REG_CR_EXT 0x1100 +#define REG_FT1IMR 0x1138 +#define BIT_FS_H2C_CMD_OK_INT_EN BIT(25) +#define REG_FT1ISR 0x113c +#define BIT_FS_H2C_CMD_OK_INT BIT(25) #define REG_DDMA_CH0SA 0x1200 #define REG_DDMA_CH0DA 0x1204 #define REG_DDMA_CH0CTRL 0x1208 diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c index 2f547cbcf6..7f3b2ea3f2 100644 --- a/drivers/net/wireless/realtek/rtw88/regd.c +++ b/drivers/net/wireless/realtek/rtw88/regd.c @@ -70,16 +70,16 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("BY", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("BZ", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("CA", RTW_REGD_IC, RTW_REGD_IC), - COUNTRY_REGD_ENT("CC", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CC", RTW_REGD_ACMA, RTW_REGD_ACMA), COUNTRY_REGD_ENT("CD", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CF", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CG", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CH", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CI", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CK", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("CL", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("CL", RTW_REGD_CHILE, RTW_REGD_CHILE), COUNTRY_REGD_ENT("CM", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("CN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CN", RTW_REGD_CN, RTW_REGD_CN), COUNTRY_REGD_ENT("CO", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("CR", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("CV", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -106,7 +106,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("FO", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("FR", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("GA", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("GB", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GB", RTW_REGD_UK, RTW_REGD_UK), COUNTRY_REGD_ENT("GD", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("GE", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("GF", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -214,7 +214,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("PT", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("PW", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("PY", RTW_REGD_FCC, RTW_REGD_FCC), - COUNTRY_REGD_ENT("QA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("QA", RTW_REGD_QATAR, RTW_REGD_QATAR), COUNTRY_REGD_ENT("RE", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("RO", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("RS", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -234,7 +234,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("SN", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("SO", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("SR", RTW_REGD_FCC, RTW_REGD_FCC), - COUNTRY_REGD_ENT("ST", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("ST", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("SV", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("SX", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("SZ", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -253,7 +253,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("TV", RTW_REGD_ETSI, RTW_REGD_WW), COUNTRY_REGD_ENT("TW", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("TZ", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("UA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("UA", RTW_REGD_UKRAINE, RTW_REGD_UKRAINE), COUNTRY_REGD_ENT("UG", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("US", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("UY", RTW_REGD_FCC, RTW_REGD_FCC), @@ -502,6 +502,14 @@ u8 rtw_regd_get(struct rtw_dev *rtwdev) } EXPORT_SYMBOL(rtw_regd_get); +bool rtw_regd_srrc(struct rtw_dev *rtwdev) +{ + struct rtw_regd *regd = &rtwdev->regd; + + return rtw_reg_match(regd->regulatory, "CN"); +} +EXPORT_SYMBOL(rtw_regd_srrc); + struct rtw_regd_alternative_t { bool set; u8 alt; @@ -519,6 +527,8 @@ rtw_regd_alt[RTW_REGD_MAX] = { DECL_REGD_ALT(RTW_REGD_UKRAINE, RTW_REGD_ETSI), DECL_REGD_ALT(RTW_REGD_MEXICO, RTW_REGD_FCC), DECL_REGD_ALT(RTW_REGD_CN, RTW_REGD_ETSI), + DECL_REGD_ALT(RTW_REGD_QATAR, RTW_REGD_ETSI), + DECL_REGD_ALT(RTW_REGD_UK, RTW_REGD_ETSI), }; bool rtw_regd_has_alt(u8 regd, u8 *regd_alt) diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h index 34cb13d0cd..3c5a6fd8e6 100644 --- a/drivers/net/wireless/realtek/rtw88/regd.h +++ b/drivers/net/wireless/realtek/rtw88/regd.h @@ -68,4 +68,6 @@ int rtw_regd_init(struct rtw_dev *rtwdev); int rtw_regd_hint(struct rtw_dev *rtwdev); u8 rtw_regd_get(struct rtw_dev *rtwdev); bool rtw_regd_has_alt(u8 regd, u8 *regd_alt); +bool rtw_regd_srrc(struct rtw_dev *rtwdev); + #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index adf224618a..429bb420b0 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -381,6 +381,65 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw) } } +static void rtw8821c_cck_tx_filter_srrc(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + struct rtw_hal *hal = &rtwdev->hal; + + if (channel == 14) { + rtw_write32_mask(rtwdev, REG_CCA_FLTR, MASKHWORD, 0xe82c); + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x0000b81c); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00000); + } else if (channel == 13 || + (channel == 11 && bw == RTW_CHANNEL_WIDTH_40)) { + rtw_write32_mask(rtwdev, REG_CCA_FLTR, MASKHWORD, 0xf8fe); + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x64b80c1c); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x8810); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x01235667); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00027); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00027); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00029); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00026); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00000); + } else { + rtw_write32_mask(rtwdev, REG_CCA_FLTR, MASKHWORD, 0xe82c); + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, + hal->ch_param[0]); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, + hal->ch_param[1] & MASKLWORD); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, + hal->ch_param[2]); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00000); + } +} + static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, u8 primary_ch_idx) { @@ -395,6 +454,13 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x0); rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x96a); + + if (rtw_regd_srrc(rtwdev)) { + rtw8821c_cck_tx_filter_srrc(rtwdev, channel, bw); + goto set_bw; + } + + /* CCK TX filter parameters for default case */ if (channel == 14) { rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x0000b81c); rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000); @@ -430,6 +496,7 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x412); } +set_bw: switch (bw) { case RTW_CHANNEL_WIDTH_20: default: diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index fcff31688c..91ed921407 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -238,6 +238,7 @@ extern const struct rtw_chip_info rtw8821c_hw_spec; #define REG_RXSB 0xa00 #define REG_ADCINI 0xa04 #define REG_PWRTH 0xa08 +#define REG_CCA_FLTR 0xa20 #define REG_TXSF2 0xa24 #define REG_TXSF6 0xa28 #define REG_FA_CCK 0xa5c diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c index 6c82c43834..0393b9a0c1 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c @@ -6013,996 +6013,1492 @@ RTW_DECL_TABLE_RF_RADIO(rtw8821c_rf_a, A); static const struct rtw_txpwr_lmt_cfg_pair rtw8821c_txpwr_lmt_type0[] = { { 0, 0, 0, 0, 1, 30, }, { 2, 0, 0, 0, 1, 30, }, - { 0, 0, 0, 0, 2, 32, }, - { 2, 0, 0, 0, 2, 30, }, - { 0, 0, 0, 0, 3, 32, }, - { 2, 0, 0, 0, 3, 30, }, - { 0, 0, 0, 0, 4, 32, }, - { 2, 0, 0, 0, 4, 30, }, - { 0, 0, 0, 0, 5, 32, }, - { 2, 0, 0, 0, 5, 30, }, - { 0, 0, 0, 0, 6, 32, }, - { 2, 0, 0, 0, 6, 30, }, - { 0, 0, 0, 0, 7, 32, }, - { 2, 0, 0, 0, 7, 30, }, - { 0, 0, 0, 0, 8, 32, }, - { 2, 0, 0, 0, 8, 30, }, - { 0, 0, 0, 0, 9, 32, }, - { 2, 0, 0, 0, 9, 30, }, - { 0, 0, 0, 0, 10, 32, }, - { 2, 0, 0, 0, 10, 30, }, - { 0, 0, 0, 0, 11, 32, }, - { 2, 0, 0, 0, 11, 30, }, - { 0, 0, 0, 0, 12, 24, }, - { 2, 0, 0, 0, 12, 30, }, - { 0, 0, 0, 0, 13, 16, }, - { 2, 0, 0, 0, 13, 30, }, - { 0, 0, 0, 0, 14, 63, }, - { 2, 0, 0, 0, 14, 63, }, - { 0, 0, 0, 1, 1, 30, }, - { 2, 0, 0, 1, 1, 30, }, - { 0, 0, 0, 1, 2, 32, }, - { 2, 0, 0, 1, 2, 30, }, - { 0, 0, 0, 1, 3, 34, }, - { 2, 0, 0, 1, 3, 30, }, - { 0, 0, 0, 1, 4, 34, }, - { 2, 0, 0, 1, 4, 30, }, - { 0, 0, 0, 1, 5, 34, }, - { 2, 0, 0, 1, 5, 30, }, - { 0, 0, 0, 1, 6, 34, }, - { 2, 0, 0, 1, 6, 30, }, - { 0, 0, 0, 1, 7, 34, }, - { 2, 0, 0, 1, 7, 30, }, - { 0, 0, 0, 1, 8, 34, }, - { 2, 0, 0, 1, 8, 30, }, - { 0, 0, 0, 1, 9, 34, }, - { 2, 0, 0, 1, 9, 30, }, - { 0, 0, 0, 1, 10, 32, }, - { 2, 0, 0, 1, 10, 30, }, - { 0, 0, 0, 1, 11, 30, }, - { 2, 0, 0, 1, 11, 30, }, - { 0, 0, 0, 1, 12, 28, }, - { 2, 0, 0, 1, 12, 30, }, - { 0, 0, 0, 1, 13, 16, }, - { 2, 0, 0, 1, 13, 30, }, - { 0, 0, 0, 1, 14, 63, }, - { 2, 0, 0, 1, 14, 63, }, - { 0, 0, 0, 2, 1, 26, }, - { 2, 0, 0, 2, 1, 30, }, - { 0, 0, 0, 2, 2, 30, }, - { 2, 0, 0, 2, 2, 30, }, - { 0, 0, 0, 2, 3, 32, }, - { 2, 0, 0, 2, 3, 30, }, - { 0, 0, 0, 2, 4, 34, }, - { 2, 0, 0, 2, 4, 30, }, - { 0, 0, 0, 2, 5, 34, }, - { 2, 0, 0, 2, 5, 30, }, - { 0, 0, 0, 2, 6, 34, }, - { 2, 0, 0, 2, 6, 30, }, - { 0, 0, 0, 2, 7, 34, }, - { 2, 0, 0, 2, 7, 30, }, - { 0, 0, 0, 2, 8, 34, }, - { 2, 0, 0, 2, 8, 30, }, - { 0, 0, 0, 2, 9, 32, }, - { 2, 0, 0, 2, 9, 30, }, - { 0, 0, 0, 2, 10, 30, }, - { 2, 0, 0, 2, 10, 30, }, - { 0, 0, 0, 2, 11, 28, }, - { 2, 0, 0, 2, 11, 30, }, - { 0, 0, 0, 2, 12, 26, }, - { 2, 0, 0, 2, 12, 30, }, - { 0, 0, 0, 2, 13, 12, }, - { 2, 0, 0, 2, 13, 30, }, - { 0, 0, 0, 2, 14, 63, }, - { 2, 0, 0, 2, 14, 63, }, - { 0, 0, 1, 2, 1, 63, }, - { 2, 0, 1, 2, 1, 63, }, - { 0, 0, 1, 2, 2, 63, }, - { 2, 0, 1, 2, 2, 63, }, - { 0, 0, 1, 2, 3, 26, }, - { 2, 0, 1, 2, 3, 30, }, - { 0, 0, 1, 2, 4, 26, }, - { 2, 0, 1, 2, 4, 30, }, - { 0, 0, 1, 2, 5, 30, }, - { 2, 0, 1, 2, 5, 30, }, - { 0, 0, 1, 2, 6, 30, }, - { 2, 0, 1, 2, 6, 30, }, - { 0, 0, 1, 2, 7, 30, }, - { 2, 0, 1, 2, 7, 30, }, - { 0, 0, 1, 2, 8, 26, }, - { 2, 0, 1, 2, 8, 30, }, - { 0, 0, 1, 2, 9, 26, }, - { 2, 0, 1, 2, 9, 30, }, - { 0, 0, 1, 2, 10, 28, }, - { 2, 0, 1, 2, 10, 30, }, - { 0, 0, 1, 2, 11, 20, }, - { 2, 0, 1, 2, 11, 30, }, - { 0, 0, 1, 2, 12, 63, }, - { 2, 0, 1, 2, 12, 63, }, - { 0, 0, 1, 2, 13, 63, }, - { 2, 0, 1, 2, 13, 63, }, - { 0, 0, 1, 2, 14, 63, }, - { 2, 0, 1, 2, 14, 63, }, - { 0, 1, 0, 1, 36, 31, }, - { 2, 1, 0, 1, 36, 32, }, - { 0, 1, 0, 1, 40, 33, }, - { 2, 1, 0, 1, 40, 32, }, - { 0, 1, 0, 1, 44, 33, }, - { 2, 1, 0, 1, 44, 32, }, - { 0, 1, 0, 1, 48, 31, }, - { 2, 1, 0, 1, 48, 32, }, - { 0, 1, 0, 1, 52, 33, }, - { 2, 1, 0, 1, 52, 32, }, - { 0, 1, 0, 1, 56, 33, }, - { 2, 1, 0, 1, 56, 32, }, - { 0, 1, 0, 1, 60, 33, }, - { 2, 1, 0, 1, 60, 32, }, - { 0, 1, 0, 1, 64, 30, }, - { 2, 1, 0, 1, 64, 32, }, - { 0, 1, 0, 1, 100, 30, }, - { 2, 1, 0, 1, 100, 32, }, - { 0, 1, 0, 1, 104, 33, }, - { 2, 1, 0, 1, 104, 32, }, - { 0, 1, 0, 1, 108, 33, }, - { 2, 1, 0, 1, 108, 32, }, - { 0, 1, 0, 1, 112, 33, }, - { 2, 1, 0, 1, 112, 32, }, - { 0, 1, 0, 1, 116, 33, }, - { 2, 1, 0, 1, 116, 32, }, - { 0, 1, 0, 1, 120, 33, }, - { 2, 1, 0, 1, 120, 32, }, - { 0, 1, 0, 1, 124, 33, }, - { 2, 1, 0, 1, 124, 32, }, - { 0, 1, 0, 1, 128, 33, }, - { 2, 1, 0, 1, 128, 32, }, - { 0, 1, 0, 1, 132, 33, }, - { 2, 1, 0, 1, 132, 32, }, - { 0, 1, 0, 1, 136, 33, }, - { 2, 1, 0, 1, 136, 32, }, - { 0, 1, 0, 1, 140, 31, }, - { 2, 1, 0, 1, 140, 32, }, - { 0, 1, 0, 1, 144, 30, }, - { 2, 1, 0, 1, 144, 63, }, - { 0, 1, 0, 1, 149, 33, }, - { 2, 1, 0, 1, 149, 63, }, - { 0, 1, 0, 1, 153, 33, }, - { 2, 1, 0, 1, 153, 63, }, - { 0, 1, 0, 1, 157, 33, }, - { 2, 1, 0, 1, 157, 63, }, - { 0, 1, 0, 1, 161, 33, }, - { 2, 1, 0, 1, 161, 63, }, - { 0, 1, 0, 1, 165, 33, }, - { 2, 1, 0, 1, 165, 63, }, - { 0, 1, 0, 2, 36, 30, }, - { 2, 1, 0, 2, 36, 32, }, - { 0, 1, 0, 2, 40, 33, }, - { 2, 1, 0, 2, 40, 32, }, - { 0, 1, 0, 2, 44, 33, }, - { 2, 1, 0, 2, 44, 32, }, - { 0, 1, 0, 2, 48, 33, }, - { 2, 1, 0, 2, 48, 32, }, - { 0, 1, 0, 2, 52, 33, }, - { 2, 1, 0, 2, 52, 32, }, - { 0, 1, 0, 2, 56, 33, }, - { 2, 1, 0, 2, 56, 32, }, - { 0, 1, 0, 2, 60, 33, }, - { 2, 1, 0, 2, 60, 32, }, - { 0, 1, 0, 2, 64, 30, }, - { 2, 1, 0, 2, 64, 32, }, - { 0, 1, 0, 2, 100, 30, }, - { 2, 1, 0, 2, 100, 32, }, - { 0, 1, 0, 2, 104, 33, }, - { 2, 1, 0, 2, 104, 32, }, - { 0, 1, 0, 2, 108, 33, }, - { 2, 1, 0, 2, 108, 32, }, - { 0, 1, 0, 2, 112, 33, }, - { 2, 1, 0, 2, 112, 32, }, - { 0, 1, 0, 2, 116, 33, }, - { 2, 1, 0, 2, 116, 32, }, - { 0, 1, 0, 2, 120, 33, }, - { 2, 1, 0, 2, 120, 32, }, - { 0, 1, 0, 2, 124, 33, }, - { 2, 1, 0, 2, 124, 32, }, - { 0, 1, 0, 2, 128, 33, }, - { 2, 1, 0, 2, 128, 32, }, - { 0, 1, 0, 2, 132, 33, }, - { 2, 1, 0, 2, 132, 32, }, - { 0, 1, 0, 2, 136, 33, }, - { 2, 1, 0, 2, 136, 32, }, - { 0, 1, 0, 2, 140, 29, }, - { 2, 1, 0, 2, 140, 32, }, - { 0, 1, 0, 2, 144, 27, }, - { 2, 1, 0, 2, 144, 63, }, - { 0, 1, 0, 2, 149, 33, }, - { 2, 1, 0, 2, 149, 63, }, - { 0, 1, 0, 2, 153, 33, }, - { 2, 1, 0, 2, 153, 63, }, - { 0, 1, 0, 2, 157, 33, }, - { 2, 1, 0, 2, 157, 63, }, - { 0, 1, 0, 2, 161, 33, }, - { 2, 1, 0, 2, 161, 63, }, - { 0, 1, 0, 2, 165, 33, }, - { 2, 1, 0, 2, 165, 63, }, - { 0, 1, 1, 2, 38, 22, }, - { 2, 1, 1, 2, 38, 32, }, - { 0, 1, 1, 2, 46, 32, }, - { 2, 1, 1, 2, 46, 32, }, - { 0, 1, 1, 2, 54, 32, }, - { 2, 1, 1, 2, 54, 32, }, - { 0, 1, 1, 2, 62, 23, }, - { 2, 1, 1, 2, 62, 32, }, - { 0, 1, 1, 2, 102, 21, }, - { 2, 1, 1, 2, 102, 32, }, - { 0, 1, 1, 2, 110, 32, }, - { 2, 1, 1, 2, 110, 32, }, - { 0, 1, 1, 2, 118, 32, }, - { 2, 1, 1, 2, 118, 32, }, - { 0, 1, 1, 2, 126, 32, }, - { 2, 1, 1, 2, 126, 32, }, - { 0, 1, 1, 2, 134, 32, }, - { 2, 1, 1, 2, 134, 32, }, - { 0, 1, 1, 2, 142, 29, }, - { 2, 1, 1, 2, 142, 63, }, - { 0, 1, 1, 2, 151, 32, }, - { 2, 1, 1, 2, 151, 63, }, - { 0, 1, 1, 2, 159, 32, }, - { 2, 1, 1, 2, 159, 63, }, - { 0, 1, 2, 4, 42, 19, }, - { 2, 1, 2, 4, 42, 32, }, - { 0, 1, 2, 4, 58, 22, }, - { 2, 1, 2, 4, 58, 32, }, - { 0, 1, 2, 4, 106, 18, }, - { 2, 1, 2, 4, 106, 32, }, - { 0, 1, 2, 4, 122, 32, }, - { 2, 1, 2, 4, 122, 32, }, - { 0, 1, 2, 4, 138, 28, }, - { 2, 1, 2, 4, 138, 63, }, - { 0, 1, 2, 4, 155, 32, }, - { 2, 1, 2, 4, 155, 63, }, { 1, 0, 0, 0, 1, 34, }, { 3, 0, 0, 0, 1, 30, }, { 4, 0, 0, 0, 1, 34, }, { 5, 0, 0, 0, 1, 30, }, { 6, 0, 0, 0, 1, 30, }, { 7, 0, 0, 0, 1, 30, }, + { 8, 0, 0, 0, 1, 30, }, + { 9, 0, 0, 0, 1, 28, }, + { 10, 0, 0, 0, 1, 30, }, + { 11, 0, 0, 0, 1, 30, }, + { 0, 0, 0, 0, 2, 32, }, + { 2, 0, 0, 0, 2, 30, }, { 1, 0, 0, 0, 2, 34, }, { 3, 0, 0, 0, 2, 32, }, { 4, 0, 0, 0, 2, 34, }, { 5, 0, 0, 0, 2, 30, }, { 6, 0, 0, 0, 2, 32, }, { 7, 0, 0, 0, 2, 30, }, + { 8, 0, 0, 0, 2, 32, }, + { 9, 0, 0, 0, 2, 28, }, + { 10, 0, 0, 0, 2, 30, }, + { 11, 0, 0, 0, 2, 30, }, + { 0, 0, 0, 0, 3, 32, }, + { 2, 0, 0, 0, 3, 30, }, { 1, 0, 0, 0, 3, 34, }, { 3, 0, 0, 0, 3, 32, }, { 4, 0, 0, 0, 3, 34, }, { 5, 0, 0, 0, 3, 30, }, { 6, 0, 0, 0, 3, 32, }, { 7, 0, 0, 0, 3, 30, }, + { 8, 0, 0, 0, 3, 32, }, + { 9, 0, 0, 0, 3, 28, }, + { 10, 0, 0, 0, 3, 30, }, + { 11, 0, 0, 0, 3, 30, }, + { 0, 0, 0, 0, 4, 32, }, + { 2, 0, 0, 0, 4, 30, }, { 1, 0, 0, 0, 4, 34, }, { 3, 0, 0, 0, 4, 32, }, { 4, 0, 0, 0, 4, 34, }, { 5, 0, 0, 0, 4, 30, }, { 6, 0, 0, 0, 4, 32, }, { 7, 0, 0, 0, 4, 30, }, + { 8, 0, 0, 0, 4, 32, }, + { 9, 0, 0, 0, 4, 28, }, + { 10, 0, 0, 0, 4, 30, }, + { 11, 0, 0, 0, 4, 30, }, + { 0, 0, 0, 0, 5, 32, }, + { 2, 0, 0, 0, 5, 30, }, { 1, 0, 0, 0, 5, 34, }, { 3, 0, 0, 0, 5, 32, }, { 4, 0, 0, 0, 5, 34, }, { 5, 0, 0, 0, 5, 30, }, { 6, 0, 0, 0, 5, 32, }, { 7, 0, 0, 0, 5, 30, }, + { 8, 0, 0, 0, 5, 32, }, + { 9, 0, 0, 0, 5, 28, }, + { 10, 0, 0, 0, 5, 30, }, + { 11, 0, 0, 0, 5, 30, }, + { 0, 0, 0, 0, 6, 32, }, + { 2, 0, 0, 0, 6, 30, }, { 1, 0, 0, 0, 6, 34, }, { 3, 0, 0, 0, 6, 32, }, { 4, 0, 0, 0, 6, 34, }, { 5, 0, 0, 0, 6, 30, }, { 6, 0, 0, 0, 6, 32, }, { 7, 0, 0, 0, 6, 30, }, + { 8, 0, 0, 0, 6, 32, }, + { 9, 0, 0, 0, 6, 28, }, + { 10, 0, 0, 0, 6, 30, }, + { 11, 0, 0, 0, 6, 30, }, + { 0, 0, 0, 0, 7, 32, }, + { 2, 0, 0, 0, 7, 30, }, { 1, 0, 0, 0, 7, 34, }, { 3, 0, 0, 0, 7, 32, }, { 4, 0, 0, 0, 7, 34, }, { 5, 0, 0, 0, 7, 30, }, { 6, 0, 0, 0, 7, 32, }, { 7, 0, 0, 0, 7, 30, }, + { 8, 0, 0, 0, 7, 32, }, + { 9, 0, 0, 0, 7, 28, }, + { 10, 0, 0, 0, 7, 30, }, + { 11, 0, 0, 0, 7, 30, }, + { 0, 0, 0, 0, 8, 32, }, + { 2, 0, 0, 0, 8, 30, }, { 1, 0, 0, 0, 8, 34, }, { 3, 0, 0, 0, 8, 32, }, { 4, 0, 0, 0, 8, 34, }, { 5, 0, 0, 0, 8, 30, }, { 6, 0, 0, 0, 8, 32, }, { 7, 0, 0, 0, 8, 30, }, + { 8, 0, 0, 0, 8, 32, }, + { 9, 0, 0, 0, 8, 28, }, + { 10, 0, 0, 0, 8, 30, }, + { 11, 0, 0, 0, 8, 30, }, + { 0, 0, 0, 0, 9, 32, }, + { 2, 0, 0, 0, 9, 30, }, { 1, 0, 0, 0, 9, 34, }, { 3, 0, 0, 0, 9, 32, }, { 4, 0, 0, 0, 9, 34, }, { 5, 0, 0, 0, 9, 30, }, { 6, 0, 0, 0, 9, 32, }, { 7, 0, 0, 0, 9, 30, }, + { 8, 0, 0, 0, 9, 32, }, + { 9, 0, 0, 0, 9, 28, }, + { 10, 0, 0, 0, 9, 30, }, + { 11, 0, 0, 0, 9, 30, }, + { 0, 0, 0, 0, 10, 32, }, + { 2, 0, 0, 0, 10, 30, }, { 1, 0, 0, 0, 10, 34, }, { 3, 0, 0, 0, 10, 32, }, { 4, 0, 0, 0, 10, 34, }, { 5, 0, 0, 0, 10, 30, }, { 6, 0, 0, 0, 10, 32, }, { 7, 0, 0, 0, 10, 30, }, + { 8, 0, 0, 0, 10, 32, }, + { 9, 0, 0, 0, 10, 28, }, + { 10, 0, 0, 0, 10, 30, }, + { 11, 0, 0, 0, 10, 30, }, + { 0, 0, 0, 0, 11, 32, }, + { 2, 0, 0, 0, 11, 30, }, { 1, 0, 0, 0, 11, 34, }, { 3, 0, 0, 0, 11, 32, }, { 4, 0, 0, 0, 11, 34, }, { 5, 0, 0, 0, 11, 30, }, { 6, 0, 0, 0, 11, 32, }, { 7, 0, 0, 0, 11, 30, }, + { 8, 0, 0, 0, 11, 32, }, + { 9, 0, 0, 0, 11, 28, }, + { 10, 0, 0, 0, 11, 30, }, + { 11, 0, 0, 0, 11, 30, }, + { 0, 0, 0, 0, 12, 24, }, + { 2, 0, 0, 0, 12, 30, }, { 1, 0, 0, 0, 12, 34, }, { 3, 0, 0, 0, 12, 24, }, { 4, 0, 0, 0, 12, 34, }, { 5, 0, 0, 0, 12, 30, }, { 6, 0, 0, 0, 12, 24, }, { 7, 0, 0, 0, 12, 30, }, + { 8, 0, 0, 0, 12, 24, }, + { 9, 0, 0, 0, 12, 24, }, + { 10, 0, 0, 0, 12, 30, }, + { 11, 0, 0, 0, 12, 30, }, + { 0, 0, 0, 0, 13, 16, }, + { 2, 0, 0, 0, 13, 30, }, { 1, 0, 0, 0, 13, 34, }, { 3, 0, 0, 0, 13, 16, }, { 4, 0, 0, 0, 13, 34, }, { 5, 0, 0, 0, 13, 30, }, { 6, 0, 0, 0, 13, 16, }, { 7, 0, 0, 0, 13, 30, }, + { 8, 0, 0, 0, 13, 16, }, + { 9, 0, 0, 0, 13, 18, }, + { 10, 0, 0, 0, 13, 30, }, + { 11, 0, 0, 0, 13, 30, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, { 1, 0, 0, 0, 14, 34, }, { 3, 0, 0, 0, 14, 63, }, { 4, 0, 0, 0, 14, 63, }, { 5, 0, 0, 0, 14, 63, }, { 6, 0, 0, 0, 14, 63, }, { 7, 0, 0, 0, 14, 63, }, + { 8, 0, 0, 0, 14, 63, }, + { 9, 0, 0, 0, 14, 63, }, + { 10, 0, 0, 0, 14, 63, }, + { 11, 0, 0, 0, 14, 63, }, + { 0, 0, 0, 1, 1, 30, }, + { 2, 0, 0, 1, 1, 30, }, { 1, 0, 0, 1, 1, 34, }, { 3, 0, 0, 1, 1, 30, }, { 4, 0, 0, 1, 1, 32, }, { 5, 0, 0, 1, 1, 30, }, { 6, 0, 0, 1, 1, 30, }, { 7, 0, 0, 1, 1, 30, }, + { 8, 0, 0, 1, 1, 30, }, + { 9, 0, 0, 1, 1, 30, }, + { 10, 0, 0, 1, 1, 30, }, + { 11, 0, 0, 1, 1, 30, }, + { 0, 0, 0, 1, 2, 32, }, + { 2, 0, 0, 1, 2, 30, }, { 1, 0, 0, 1, 2, 34, }, { 3, 0, 0, 1, 2, 32, }, { 4, 0, 0, 1, 2, 34, }, { 5, 0, 0, 1, 2, 30, }, { 6, 0, 0, 1, 2, 32, }, { 7, 0, 0, 1, 2, 30, }, + { 8, 0, 0, 1, 2, 32, }, + { 9, 0, 0, 1, 2, 30, }, + { 10, 0, 0, 1, 2, 30, }, + { 11, 0, 0, 1, 2, 30, }, + { 0, 0, 0, 1, 3, 34, }, + { 2, 0, 0, 1, 3, 30, }, { 1, 0, 0, 1, 3, 34, }, { 3, 0, 0, 1, 3, 34, }, { 4, 0, 0, 1, 3, 34, }, { 5, 0, 0, 1, 3, 30, }, { 6, 0, 0, 1, 3, 34, }, { 7, 0, 0, 1, 3, 30, }, + { 8, 0, 0, 1, 3, 34, }, + { 9, 0, 0, 1, 3, 30, }, + { 10, 0, 0, 1, 3, 30, }, + { 11, 0, 0, 1, 3, 30, }, + { 0, 0, 0, 1, 4, 34, }, + { 2, 0, 0, 1, 4, 30, }, { 1, 0, 0, 1, 4, 34, }, { 3, 0, 0, 1, 4, 34, }, { 4, 0, 0, 1, 4, 34, }, { 5, 0, 0, 1, 4, 30, }, { 6, 0, 0, 1, 4, 34, }, { 7, 0, 0, 1, 4, 30, }, + { 8, 0, 0, 1, 4, 34, }, + { 9, 0, 0, 1, 4, 30, }, + { 10, 0, 0, 1, 4, 30, }, + { 11, 0, 0, 1, 4, 30, }, + { 0, 0, 0, 1, 5, 34, }, + { 2, 0, 0, 1, 5, 30, }, { 1, 0, 0, 1, 5, 34, }, { 3, 0, 0, 1, 5, 34, }, { 4, 0, 0, 1, 5, 34, }, { 5, 0, 0, 1, 5, 30, }, { 6, 0, 0, 1, 5, 34, }, { 7, 0, 0, 1, 5, 30, }, + { 8, 0, 0, 1, 5, 34, }, + { 9, 0, 0, 1, 5, 30, }, + { 10, 0, 0, 1, 5, 30, }, + { 11, 0, 0, 1, 5, 30, }, + { 0, 0, 0, 1, 6, 34, }, + { 2, 0, 0, 1, 6, 30, }, { 1, 0, 0, 1, 6, 34, }, { 3, 0, 0, 1, 6, 34, }, { 4, 0, 0, 1, 6, 34, }, { 5, 0, 0, 1, 6, 30, }, { 6, 0, 0, 1, 6, 34, }, { 7, 0, 0, 1, 6, 30, }, + { 8, 0, 0, 1, 6, 34, }, + { 9, 0, 0, 1, 6, 30, }, + { 10, 0, 0, 1, 6, 30, }, + { 11, 0, 0, 1, 6, 30, }, + { 0, 0, 0, 1, 7, 34, }, + { 2, 0, 0, 1, 7, 30, }, { 1, 0, 0, 1, 7, 34, }, { 3, 0, 0, 1, 7, 34, }, { 4, 0, 0, 1, 7, 34, }, { 5, 0, 0, 1, 7, 30, }, { 6, 0, 0, 1, 7, 34, }, { 7, 0, 0, 1, 7, 30, }, + { 8, 0, 0, 1, 7, 34, }, + { 9, 0, 0, 1, 7, 30, }, + { 10, 0, 0, 1, 7, 30, }, + { 11, 0, 0, 1, 7, 30, }, + { 0, 0, 0, 1, 8, 34, }, + { 2, 0, 0, 1, 8, 30, }, { 1, 0, 0, 1, 8, 34, }, { 3, 0, 0, 1, 8, 34, }, { 4, 0, 0, 1, 8, 34, }, { 5, 0, 0, 1, 8, 30, }, { 6, 0, 0, 1, 8, 34, }, { 7, 0, 0, 1, 8, 30, }, + { 8, 0, 0, 1, 8, 34, }, + { 9, 0, 0, 1, 8, 30, }, + { 10, 0, 0, 1, 8, 30, }, + { 11, 0, 0, 1, 8, 30, }, + { 0, 0, 0, 1, 9, 34, }, + { 2, 0, 0, 1, 9, 30, }, { 1, 0, 0, 1, 9, 34, }, { 3, 0, 0, 1, 9, 34, }, { 4, 0, 0, 1, 9, 34, }, { 5, 0, 0, 1, 9, 30, }, { 6, 0, 0, 1, 9, 34, }, { 7, 0, 0, 1, 9, 30, }, + { 8, 0, 0, 1, 9, 34, }, + { 9, 0, 0, 1, 9, 30, }, + { 10, 0, 0, 1, 9, 30, }, + { 11, 0, 0, 1, 9, 30, }, + { 0, 0, 0, 1, 10, 32, }, + { 2, 0, 0, 1, 10, 30, }, { 1, 0, 0, 1, 10, 34, }, { 3, 0, 0, 1, 10, 32, }, { 4, 0, 0, 1, 10, 34, }, { 5, 0, 0, 1, 10, 30, }, { 6, 0, 0, 1, 10, 32, }, { 7, 0, 0, 1, 10, 30, }, + { 8, 0, 0, 1, 10, 32, }, + { 9, 0, 0, 1, 10, 26, }, + { 10, 0, 0, 1, 10, 30, }, + { 11, 0, 0, 1, 10, 30, }, + { 0, 0, 0, 1, 11, 30, }, + { 2, 0, 0, 1, 11, 30, }, { 1, 0, 0, 1, 11, 34, }, { 3, 0, 0, 1, 11, 30, }, { 4, 0, 0, 1, 11, 34, }, { 5, 0, 0, 1, 11, 30, }, { 6, 0, 0, 1, 11, 30, }, { 7, 0, 0, 1, 11, 30, }, + { 8, 0, 0, 1, 11, 30, }, + { 9, 0, 0, 1, 11, 22, }, + { 10, 0, 0, 1, 11, 30, }, + { 11, 0, 0, 1, 11, 30, }, + { 0, 0, 0, 1, 12, 28, }, + { 2, 0, 0, 1, 12, 30, }, { 1, 0, 0, 1, 12, 34, }, { 3, 0, 0, 1, 12, 28, }, { 4, 0, 0, 1, 12, 34, }, { 5, 0, 0, 1, 12, 30, }, { 6, 0, 0, 1, 12, 28, }, { 7, 0, 0, 1, 12, 30, }, + { 8, 0, 0, 1, 12, 28, }, + { 9, 0, 0, 1, 12, 18, }, + { 10, 0, 0, 1, 12, 30, }, + { 11, 0, 0, 1, 12, 30, }, + { 0, 0, 0, 1, 13, 16, }, + { 2, 0, 0, 1, 13, 30, }, { 1, 0, 0, 1, 13, 34, }, { 3, 0, 0, 1, 13, 16, }, { 4, 0, 0, 1, 13, 32, }, { 5, 0, 0, 1, 13, 30, }, { 6, 0, 0, 1, 13, 16, }, { 7, 0, 0, 1, 13, 30, }, + { 8, 0, 0, 1, 13, 16, }, + { 9, 0, 0, 1, 13, 2, }, + { 10, 0, 0, 1, 13, 30, }, + { 11, 0, 0, 1, 13, 30, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, { 1, 0, 0, 1, 14, 63, }, { 3, 0, 0, 1, 14, 63, }, { 4, 0, 0, 1, 14, 63, }, { 5, 0, 0, 1, 14, 63, }, { 6, 0, 0, 1, 14, 63, }, { 7, 0, 0, 1, 14, 63, }, + { 8, 0, 0, 1, 14, 63, }, + { 9, 0, 0, 1, 14, 63, }, + { 10, 0, 0, 1, 14, 63, }, + { 11, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 26, }, + { 2, 0, 0, 2, 1, 30, }, { 1, 0, 0, 2, 1, 34, }, { 3, 0, 0, 2, 1, 26, }, { 4, 0, 0, 2, 1, 32, }, { 5, 0, 0, 2, 1, 30, }, { 6, 0, 0, 2, 1, 26, }, { 7, 0, 0, 2, 1, 30, }, + { 8, 0, 0, 2, 1, 26, }, + { 9, 0, 0, 2, 1, 30, }, + { 10, 0, 0, 2, 1, 30, }, + { 11, 0, 0, 2, 1, 30, }, + { 0, 0, 0, 2, 2, 30, }, + { 2, 0, 0, 2, 2, 30, }, { 1, 0, 0, 2, 2, 34, }, { 3, 0, 0, 2, 2, 30, }, { 4, 0, 0, 2, 2, 34, }, { 5, 0, 0, 2, 2, 30, }, { 6, 0, 0, 2, 2, 30, }, { 7, 0, 0, 2, 2, 30, }, + { 8, 0, 0, 2, 2, 30, }, + { 9, 0, 0, 2, 2, 30, }, + { 10, 0, 0, 2, 2, 30, }, + { 11, 0, 0, 2, 2, 30, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 30, }, { 1, 0, 0, 2, 3, 34, }, { 3, 0, 0, 2, 3, 32, }, { 4, 0, 0, 2, 3, 34, }, { 5, 0, 0, 2, 3, 30, }, { 6, 0, 0, 2, 3, 32, }, { 7, 0, 0, 2, 3, 30, }, + { 8, 0, 0, 2, 3, 32, }, + { 9, 0, 0, 2, 3, 30, }, + { 10, 0, 0, 2, 3, 30, }, + { 11, 0, 0, 2, 3, 30, }, + { 0, 0, 0, 2, 4, 34, }, + { 2, 0, 0, 2, 4, 30, }, { 1, 0, 0, 2, 4, 34, }, { 3, 0, 0, 2, 4, 34, }, { 4, 0, 0, 2, 4, 34, }, { 5, 0, 0, 2, 4, 30, }, { 6, 0, 0, 2, 4, 34, }, { 7, 0, 0, 2, 4, 30, }, + { 8, 0, 0, 2, 4, 34, }, + { 9, 0, 0, 2, 4, 30, }, + { 10, 0, 0, 2, 4, 30, }, + { 11, 0, 0, 2, 4, 30, }, + { 0, 0, 0, 2, 5, 34, }, + { 2, 0, 0, 2, 5, 30, }, { 1, 0, 0, 2, 5, 34, }, { 3, 0, 0, 2, 5, 34, }, { 4, 0, 0, 2, 5, 34, }, { 5, 0, 0, 2, 5, 30, }, { 6, 0, 0, 2, 5, 34, }, { 7, 0, 0, 2, 5, 30, }, + { 8, 0, 0, 2, 5, 34, }, + { 9, 0, 0, 2, 5, 30, }, + { 10, 0, 0, 2, 5, 30, }, + { 11, 0, 0, 2, 5, 30, }, + { 0, 0, 0, 2, 6, 34, }, + { 2, 0, 0, 2, 6, 30, }, { 1, 0, 0, 2, 6, 34, }, { 3, 0, 0, 2, 6, 34, }, { 4, 0, 0, 2, 6, 34, }, { 5, 0, 0, 2, 6, 30, }, { 6, 0, 0, 2, 6, 34, }, { 7, 0, 0, 2, 6, 30, }, + { 8, 0, 0, 2, 6, 34, }, + { 9, 0, 0, 2, 6, 30, }, + { 10, 0, 0, 2, 6, 30, }, + { 11, 0, 0, 2, 6, 30, }, + { 0, 0, 0, 2, 7, 34, }, + { 2, 0, 0, 2, 7, 30, }, { 1, 0, 0, 2, 7, 34, }, { 3, 0, 0, 2, 7, 34, }, { 4, 0, 0, 2, 7, 34, }, { 5, 0, 0, 2, 7, 30, }, { 6, 0, 0, 2, 7, 34, }, { 7, 0, 0, 2, 7, 30, }, + { 8, 0, 0, 2, 7, 34, }, + { 9, 0, 0, 2, 7, 30, }, + { 10, 0, 0, 2, 7, 30, }, + { 11, 0, 0, 2, 7, 30, }, + { 0, 0, 0, 2, 8, 34, }, + { 2, 0, 0, 2, 8, 30, }, { 1, 0, 0, 2, 8, 34, }, { 3, 0, 0, 2, 8, 34, }, { 4, 0, 0, 2, 8, 34, }, { 5, 0, 0, 2, 8, 30, }, { 6, 0, 0, 2, 8, 34, }, { 7, 0, 0, 2, 8, 30, }, + { 8, 0, 0, 2, 8, 34, }, + { 9, 0, 0, 2, 8, 30, }, + { 10, 0, 0, 2, 8, 30, }, + { 11, 0, 0, 2, 8, 30, }, + { 0, 0, 0, 2, 9, 32, }, + { 2, 0, 0, 2, 9, 30, }, { 1, 0, 0, 2, 9, 34, }, { 3, 0, 0, 2, 9, 32, }, { 4, 0, 0, 2, 9, 34, }, { 5, 0, 0, 2, 9, 30, }, { 6, 0, 0, 2, 9, 32, }, { 7, 0, 0, 2, 9, 30, }, + { 8, 0, 0, 2, 9, 32, }, + { 9, 0, 0, 2, 9, 30, }, + { 10, 0, 0, 2, 9, 30, }, + { 11, 0, 0, 2, 9, 30, }, + { 0, 0, 0, 2, 10, 30, }, + { 2, 0, 0, 2, 10, 30, }, { 1, 0, 0, 2, 10, 34, }, { 3, 0, 0, 2, 10, 30, }, { 4, 0, 0, 2, 10, 34, }, { 5, 0, 0, 2, 10, 30, }, { 6, 0, 0, 2, 10, 30, }, { 7, 0, 0, 2, 10, 30, }, + { 8, 0, 0, 2, 10, 30, }, + { 9, 0, 0, 2, 10, 24, }, + { 10, 0, 0, 2, 10, 30, }, + { 11, 0, 0, 2, 10, 30, }, + { 0, 0, 0, 2, 11, 28, }, + { 2, 0, 0, 2, 11, 30, }, { 1, 0, 0, 2, 11, 34, }, { 3, 0, 0, 2, 11, 28, }, { 4, 0, 0, 2, 11, 34, }, { 5, 0, 0, 2, 11, 30, }, { 6, 0, 0, 2, 11, 28, }, { 7, 0, 0, 2, 11, 30, }, + { 8, 0, 0, 2, 11, 28, }, + { 9, 0, 0, 2, 11, 20, }, + { 10, 0, 0, 2, 11, 30, }, + { 11, 0, 0, 2, 11, 30, }, + { 0, 0, 0, 2, 12, 26, }, + { 2, 0, 0, 2, 12, 30, }, { 1, 0, 0, 2, 12, 34, }, { 3, 0, 0, 2, 12, 26, }, { 4, 0, 0, 2, 12, 34, }, { 5, 0, 0, 2, 12, 30, }, { 6, 0, 0, 2, 12, 26, }, { 7, 0, 0, 2, 12, 30, }, + { 8, 0, 0, 2, 12, 26, }, + { 9, 0, 0, 2, 12, 16, }, + { 10, 0, 0, 2, 12, 30, }, + { 11, 0, 0, 2, 12, 30, }, + { 0, 0, 0, 2, 13, 12, }, + { 2, 0, 0, 2, 13, 30, }, { 1, 0, 0, 2, 13, 34, }, { 3, 0, 0, 2, 13, 12, }, { 4, 0, 0, 2, 13, 32, }, { 5, 0, 0, 2, 13, 30, }, { 6, 0, 0, 2, 13, 12, }, { 7, 0, 0, 2, 13, 30, }, + { 8, 0, 0, 2, 13, 12, }, + { 9, 0, 0, 2, 13, 0, }, + { 10, 0, 0, 2, 13, 30, }, + { 11, 0, 0, 2, 13, 30, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, { 1, 0, 0, 2, 14, 63, }, { 3, 0, 0, 2, 14, 63, }, { 4, 0, 0, 2, 14, 63, }, { 5, 0, 0, 2, 14, 63, }, { 6, 0, 0, 2, 14, 63, }, { 7, 0, 0, 2, 14, 63, }, + { 8, 0, 0, 2, 14, 63, }, + { 9, 0, 0, 2, 14, 63, }, + { 10, 0, 0, 2, 14, 63, }, + { 11, 0, 0, 2, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, { 1, 0, 1, 2, 1, 63, }, { 3, 0, 1, 2, 1, 63, }, { 4, 0, 1, 2, 1, 63, }, { 5, 0, 1, 2, 1, 63, }, { 6, 0, 1, 2, 1, 63, }, { 7, 0, 1, 2, 1, 63, }, + { 8, 0, 1, 2, 1, 63, }, + { 9, 0, 1, 2, 1, 63, }, + { 10, 0, 1, 2, 1, 63, }, + { 11, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, { 1, 0, 1, 2, 2, 63, }, { 3, 0, 1, 2, 2, 63, }, { 4, 0, 1, 2, 2, 63, }, { 5, 0, 1, 2, 2, 63, }, { 6, 0, 1, 2, 2, 63, }, { 7, 0, 1, 2, 2, 63, }, + { 8, 0, 1, 2, 2, 63, }, + { 9, 0, 1, 2, 2, 63, }, + { 10, 0, 1, 2, 2, 63, }, + { 11, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 26, }, + { 2, 0, 1, 2, 3, 30, }, { 1, 0, 1, 2, 3, 30, }, { 3, 0, 1, 2, 3, 26, }, { 4, 0, 1, 2, 3, 30, }, { 5, 0, 1, 2, 3, 30, }, { 6, 0, 1, 2, 3, 26, }, { 7, 0, 1, 2, 3, 30, }, + { 8, 0, 1, 2, 3, 26, }, + { 9, 0, 1, 2, 3, 29, }, + { 10, 0, 1, 2, 3, 30, }, + { 11, 0, 1, 2, 3, 30, }, + { 0, 0, 1, 2, 4, 26, }, + { 2, 0, 1, 2, 4, 30, }, { 1, 0, 1, 2, 4, 30, }, { 3, 0, 1, 2, 4, 26, }, { 4, 0, 1, 2, 4, 30, }, { 5, 0, 1, 2, 4, 30, }, { 6, 0, 1, 2, 4, 26, }, { 7, 0, 1, 2, 4, 30, }, + { 8, 0, 1, 2, 4, 26, }, + { 9, 0, 1, 2, 4, 29, }, + { 10, 0, 1, 2, 4, 30, }, + { 11, 0, 1, 2, 4, 30, }, + { 0, 0, 1, 2, 5, 30, }, + { 2, 0, 1, 2, 5, 30, }, { 1, 0, 1, 2, 5, 30, }, { 3, 0, 1, 2, 5, 30, }, { 4, 0, 1, 2, 5, 30, }, { 5, 0, 1, 2, 5, 30, }, { 6, 0, 1, 2, 5, 30, }, { 7, 0, 1, 2, 5, 30, }, + { 8, 0, 1, 2, 5, 30, }, + { 9, 0, 1, 2, 5, 29, }, + { 10, 0, 1, 2, 5, 30, }, + { 11, 0, 1, 2, 5, 30, }, + { 0, 0, 1, 2, 6, 30, }, + { 2, 0, 1, 2, 6, 30, }, { 1, 0, 1, 2, 6, 30, }, { 3, 0, 1, 2, 6, 30, }, { 4, 0, 1, 2, 6, 30, }, { 5, 0, 1, 2, 6, 30, }, { 6, 0, 1, 2, 6, 30, }, { 7, 0, 1, 2, 6, 30, }, + { 8, 0, 1, 2, 6, 30, }, + { 9, 0, 1, 2, 6, 29, }, + { 10, 0, 1, 2, 6, 30, }, + { 11, 0, 1, 2, 6, 30, }, + { 0, 0, 1, 2, 7, 30, }, + { 2, 0, 1, 2, 7, 30, }, { 1, 0, 1, 2, 7, 30, }, { 3, 0, 1, 2, 7, 30, }, { 4, 0, 1, 2, 7, 30, }, { 5, 0, 1, 2, 7, 30, }, { 6, 0, 1, 2, 7, 30, }, { 7, 0, 1, 2, 7, 30, }, + { 8, 0, 1, 2, 7, 30, }, + { 9, 0, 1, 2, 7, 29, }, + { 10, 0, 1, 2, 7, 30, }, + { 11, 0, 1, 2, 7, 30, }, + { 0, 0, 1, 2, 8, 26, }, + { 2, 0, 1, 2, 8, 30, }, { 1, 0, 1, 2, 8, 30, }, { 3, 0, 1, 2, 8, 26, }, { 4, 0, 1, 2, 8, 30, }, { 5, 0, 1, 2, 8, 30, }, { 6, 0, 1, 2, 8, 26, }, { 7, 0, 1, 2, 8, 30, }, + { 8, 0, 1, 2, 8, 26, }, + { 9, 0, 1, 2, 8, 25, }, + { 10, 0, 1, 2, 8, 30, }, + { 11, 0, 1, 2, 8, 30, }, + { 0, 0, 1, 2, 9, 26, }, + { 2, 0, 1, 2, 9, 30, }, { 1, 0, 1, 2, 9, 30, }, { 3, 0, 1, 2, 9, 26, }, { 4, 0, 1, 2, 9, 30, }, { 5, 0, 1, 2, 9, 30, }, { 6, 0, 1, 2, 9, 26, }, { 7, 0, 1, 2, 9, 30, }, + { 8, 0, 1, 2, 9, 26, }, + { 9, 0, 1, 2, 9, 21, }, + { 10, 0, 1, 2, 9, 30, }, + { 11, 0, 1, 2, 9, 30, }, + { 0, 0, 1, 2, 10, 28, }, + { 2, 0, 1, 2, 10, 30, }, { 1, 0, 1, 2, 10, 30, }, { 3, 0, 1, 2, 10, 28, }, { 4, 0, 1, 2, 10, 30, }, { 5, 0, 1, 2, 10, 30, }, { 6, 0, 1, 2, 10, 28, }, { 7, 0, 1, 2, 10, 30, }, + { 8, 0, 1, 2, 10, 28, }, + { 9, 0, 1, 2, 10, 17, }, + { 10, 0, 1, 2, 10, 30, }, + { 11, 0, 1, 2, 10, 30, }, + { 0, 0, 1, 2, 11, 20, }, + { 2, 0, 1, 2, 11, 30, }, { 1, 0, 1, 2, 11, 30, }, { 3, 0, 1, 2, 11, 20, }, { 4, 0, 1, 2, 11, 30, }, { 5, 0, 1, 2, 11, 30, }, { 6, 0, 1, 2, 11, 20, }, { 7, 0, 1, 2, 11, 30, }, + { 8, 0, 1, 2, 11, 20, }, + { 9, 0, 1, 2, 11, 5, }, + { 10, 0, 1, 2, 11, 30, }, + { 11, 0, 1, 2, 11, 30, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 63, }, { 1, 0, 1, 2, 12, 63, }, { 3, 0, 1, 2, 12, 63, }, { 4, 0, 1, 2, 12, 63, }, { 5, 0, 1, 2, 12, 63, }, { 6, 0, 1, 2, 12, 63, }, { 7, 0, 1, 2, 12, 63, }, + { 8, 0, 1, 2, 12, 63, }, + { 9, 0, 1, 2, 12, 63, }, + { 10, 0, 1, 2, 12, 63, }, + { 11, 0, 1, 2, 12, 63, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 63, }, { 1, 0, 1, 2, 13, 63, }, { 3, 0, 1, 2, 13, 63, }, { 4, 0, 1, 2, 13, 63, }, { 5, 0, 1, 2, 13, 63, }, { 6, 0, 1, 2, 13, 63, }, { 7, 0, 1, 2, 13, 63, }, + { 8, 0, 1, 2, 13, 63, }, + { 9, 0, 1, 2, 13, 63, }, + { 10, 0, 1, 2, 13, 63, }, + { 11, 0, 1, 2, 13, 63, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, { 1, 0, 1, 2, 14, 63, }, { 3, 0, 1, 2, 14, 63, }, { 4, 0, 1, 2, 14, 63, }, { 5, 0, 1, 2, 14, 63, }, { 6, 0, 1, 2, 14, 63, }, { 7, 0, 1, 2, 14, 63, }, + { 8, 0, 1, 2, 14, 63, }, + { 9, 0, 1, 2, 14, 63, }, + { 10, 0, 1, 2, 14, 63, }, + { 11, 0, 1, 2, 14, 63, }, + { 0, 1, 0, 1, 36, 31, }, + { 2, 1, 0, 1, 36, 32, }, { 1, 1, 0, 1, 36, 33, }, { 3, 1, 0, 1, 36, 31, }, { 4, 1, 0, 1, 36, 29, }, { 5, 1, 0, 1, 36, 32, }, - { 6, 1, 0, 1, 36, 29, }, + { 6, 1, 0, 1, 36, 31, }, { 7, 1, 0, 1, 36, 27, }, + { 8, 1, 0, 1, 36, 31, }, + { 9, 1, 0, 1, 36, 29, }, + { 10, 1, 0, 1, 36, 63, }, + { 11, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 33, }, + { 2, 1, 0, 1, 40, 32, }, { 1, 1, 0, 1, 40, 33, }, { 3, 1, 0, 1, 40, 31, }, { 4, 1, 0, 1, 40, 28, }, { 5, 1, 0, 1, 40, 32, }, - { 6, 1, 0, 1, 40, 29, }, + { 6, 1, 0, 1, 40, 33, }, { 7, 1, 0, 1, 40, 27, }, + { 8, 1, 0, 1, 40, 31, }, + { 9, 1, 0, 1, 40, 29, }, + { 10, 1, 0, 1, 40, 63, }, + { 11, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 33, }, + { 2, 1, 0, 1, 44, 32, }, { 1, 1, 0, 1, 44, 33, }, { 3, 1, 0, 1, 44, 31, }, { 4, 1, 0, 1, 44, 28, }, { 5, 1, 0, 1, 44, 32, }, - { 6, 1, 0, 1, 44, 30, }, + { 6, 1, 0, 1, 44, 33, }, { 7, 1, 0, 1, 44, 27, }, + { 8, 1, 0, 1, 44, 31, }, + { 9, 1, 0, 1, 44, 29, }, + { 10, 1, 0, 1, 44, 63, }, + { 11, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 31, }, + { 2, 1, 0, 1, 48, 32, }, { 1, 1, 0, 1, 48, 33, }, { 3, 1, 0, 1, 48, 31, }, { 4, 1, 0, 1, 48, 27, }, { 5, 1, 0, 1, 48, 32, }, - { 6, 1, 0, 1, 48, 30, }, + { 6, 1, 0, 1, 48, 31, }, { 7, 1, 0, 1, 48, 27, }, + { 8, 1, 0, 1, 48, 31, }, + { 9, 1, 0, 1, 48, 29, }, + { 10, 1, 0, 1, 48, 63, }, + { 11, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 33, }, + { 2, 1, 0, 1, 52, 32, }, { 1, 1, 0, 1, 52, 33, }, { 3, 1, 0, 1, 52, 32, }, { 4, 1, 0, 1, 52, 16, }, { 5, 1, 0, 1, 52, 32, }, - { 6, 1, 0, 1, 52, 30, }, + { 6, 1, 0, 1, 52, 33, }, { 7, 1, 0, 1, 52, 27, }, + { 8, 1, 0, 1, 52, 33, }, + { 9, 1, 0, 1, 52, 29, }, + { 10, 1, 0, 1, 52, 63, }, + { 11, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 33, }, + { 2, 1, 0, 1, 56, 32, }, { 1, 1, 0, 1, 56, 33, }, { 3, 1, 0, 1, 56, 32, }, { 4, 1, 0, 1, 56, 33, }, { 5, 1, 0, 1, 56, 32, }, - { 6, 1, 0, 1, 56, 30, }, + { 6, 1, 0, 1, 56, 33, }, { 7, 1, 0, 1, 56, 27, }, + { 8, 1, 0, 1, 56, 33, }, + { 9, 1, 0, 1, 56, 29, }, + { 10, 1, 0, 1, 56, 63, }, + { 11, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 33, }, + { 2, 1, 0, 1, 60, 32, }, { 1, 1, 0, 1, 60, 33, }, { 3, 1, 0, 1, 60, 32, }, { 4, 1, 0, 1, 60, 33, }, { 5, 1, 0, 1, 60, 32, }, - { 6, 1, 0, 1, 60, 30, }, + { 6, 1, 0, 1, 60, 33, }, { 7, 1, 0, 1, 60, 27, }, + { 8, 1, 0, 1, 60, 33, }, + { 9, 1, 0, 1, 60, 29, }, + { 10, 1, 0, 1, 60, 63, }, + { 11, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 30, }, + { 2, 1, 0, 1, 64, 32, }, { 1, 1, 0, 1, 64, 33, }, { 3, 1, 0, 1, 64, 30, }, { 4, 1, 0, 1, 64, 33, }, { 5, 1, 0, 1, 64, 32, }, - { 6, 1, 0, 1, 64, 29, }, + { 6, 1, 0, 1, 64, 30, }, { 7, 1, 0, 1, 64, 27, }, + { 8, 1, 0, 1, 64, 30, }, + { 9, 1, 0, 1, 64, 29, }, + { 10, 1, 0, 1, 64, 63, }, + { 11, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 30, }, + { 2, 1, 0, 1, 100, 32, }, { 1, 1, 0, 1, 100, 33, }, { 3, 1, 0, 1, 100, 30, }, { 4, 1, 0, 1, 100, 33, }, { 5, 1, 0, 1, 100, 32, }, - { 6, 1, 0, 1, 100, 30, }, + { 6, 1, 0, 1, 100, 33, }, { 7, 1, 0, 1, 100, 27, }, + { 8, 1, 0, 1, 100, 30, }, + { 9, 1, 0, 1, 100, 63, }, + { 10, 1, 0, 1, 100, 63, }, + { 11, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 33, }, + { 2, 1, 0, 1, 104, 32, }, { 1, 1, 0, 1, 104, 33, }, { 3, 1, 0, 1, 104, 33, }, { 4, 1, 0, 1, 104, 33, }, { 5, 1, 0, 1, 104, 32, }, - { 6, 1, 0, 1, 104, 30, }, + { 6, 1, 0, 1, 104, 33, }, { 7, 1, 0, 1, 104, 27, }, + { 8, 1, 0, 1, 104, 33, }, + { 9, 1, 0, 1, 104, 63, }, + { 10, 1, 0, 1, 104, 63, }, + { 11, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 33, }, + { 2, 1, 0, 1, 108, 32, }, { 1, 1, 0, 1, 108, 33, }, { 3, 1, 0, 1, 108, 33, }, { 4, 1, 0, 1, 108, 33, }, { 5, 1, 0, 1, 108, 32, }, - { 6, 1, 0, 1, 108, 30, }, + { 6, 1, 0, 1, 108, 33, }, { 7, 1, 0, 1, 108, 27, }, + { 8, 1, 0, 1, 108, 33, }, + { 9, 1, 0, 1, 108, 63, }, + { 10, 1, 0, 1, 108, 63, }, + { 11, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 33, }, + { 2, 1, 0, 1, 112, 32, }, { 1, 1, 0, 1, 112, 33, }, { 3, 1, 0, 1, 112, 33, }, { 4, 1, 0, 1, 112, 33, }, { 5, 1, 0, 1, 112, 32, }, - { 6, 1, 0, 1, 112, 30, }, + { 6, 1, 0, 1, 112, 33, }, { 7, 1, 0, 1, 112, 27, }, + { 8, 1, 0, 1, 112, 33, }, + { 9, 1, 0, 1, 112, 63, }, + { 10, 1, 0, 1, 112, 63, }, + { 11, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 33, }, + { 2, 1, 0, 1, 116, 32, }, { 1, 1, 0, 1, 116, 33, }, { 3, 1, 0, 1, 116, 33, }, { 4, 1, 0, 1, 116, 33, }, { 5, 1, 0, 1, 116, 32, }, - { 6, 1, 0, 1, 116, 30, }, + { 6, 1, 0, 1, 116, 33, }, { 7, 1, 0, 1, 116, 27, }, + { 8, 1, 0, 1, 116, 33, }, + { 9, 1, 0, 1, 116, 63, }, + { 10, 1, 0, 1, 116, 63, }, + { 11, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 33, }, + { 2, 1, 0, 1, 120, 32, }, { 1, 1, 0, 1, 120, 33, }, { 3, 1, 0, 1, 120, 63, }, { 4, 1, 0, 1, 120, 33, }, { 5, 1, 0, 1, 120, 63, }, - { 6, 1, 0, 1, 120, 30, }, + { 6, 1, 0, 1, 120, 33, }, { 7, 1, 0, 1, 120, 27, }, + { 8, 1, 0, 1, 120, 33, }, + { 9, 1, 0, 1, 120, 63, }, + { 10, 1, 0, 1, 120, 63, }, + { 11, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 33, }, + { 2, 1, 0, 1, 124, 32, }, { 1, 1, 0, 1, 124, 33, }, { 3, 1, 0, 1, 124, 63, }, { 4, 1, 0, 1, 124, 33, }, { 5, 1, 0, 1, 124, 63, }, - { 6, 1, 0, 1, 124, 30, }, + { 6, 1, 0, 1, 124, 33, }, { 7, 1, 0, 1, 124, 27, }, + { 8, 1, 0, 1, 124, 33, }, + { 9, 1, 0, 1, 124, 63, }, + { 10, 1, 0, 1, 124, 63, }, + { 11, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 33, }, + { 2, 1, 0, 1, 128, 32, }, { 1, 1, 0, 1, 128, 33, }, { 3, 1, 0, 1, 128, 63, }, - { 4, 1, 0, 1, 128, 63, }, + { 4, 1, 0, 1, 128, 33, }, { 5, 1, 0, 1, 128, 63, }, - { 6, 1, 0, 1, 128, 30, }, + { 6, 1, 0, 1, 128, 33, }, { 7, 1, 0, 1, 128, 27, }, + { 8, 1, 0, 1, 128, 33, }, + { 9, 1, 0, 1, 128, 63, }, + { 10, 1, 0, 1, 128, 63, }, + { 11, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 33, }, + { 2, 1, 0, 1, 132, 32, }, { 1, 1, 0, 1, 132, 33, }, { 3, 1, 0, 1, 132, 33, }, - { 4, 1, 0, 1, 132, 63, }, + { 4, 1, 0, 1, 132, 33, }, { 5, 1, 0, 1, 132, 32, }, - { 6, 1, 0, 1, 132, 30, }, + { 6, 1, 0, 1, 132, 33, }, { 7, 1, 0, 1, 132, 27, }, + { 8, 1, 0, 1, 132, 33, }, + { 9, 1, 0, 1, 132, 63, }, + { 10, 1, 0, 1, 132, 63, }, + { 11, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 33, }, + { 2, 1, 0, 1, 136, 32, }, { 1, 1, 0, 1, 136, 33, }, { 3, 1, 0, 1, 136, 33, }, - { 4, 1, 0, 1, 136, 63, }, + { 4, 1, 0, 1, 136, 33, }, { 5, 1, 0, 1, 136, 32, }, - { 6, 1, 0, 1, 136, 30, }, - { 7, 1, 0, 1, 136, 63, }, + { 6, 1, 0, 1, 136, 33, }, + { 7, 1, 0, 1, 136, 27, }, + { 8, 1, 0, 1, 136, 33, }, + { 9, 1, 0, 1, 136, 63, }, + { 10, 1, 0, 1, 136, 63, }, + { 11, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 31, }, + { 2, 1, 0, 1, 140, 32, }, { 1, 1, 0, 1, 140, 33, }, { 3, 1, 0, 1, 140, 31, }, - { 4, 1, 0, 1, 140, 63, }, + { 4, 1, 0, 1, 140, 33, }, { 5, 1, 0, 1, 140, 32, }, - { 6, 1, 0, 1, 140, 30, }, - { 7, 1, 0, 1, 140, 63, }, + { 6, 1, 0, 1, 140, 33, }, + { 7, 1, 0, 1, 140, 27, }, + { 8, 1, 0, 1, 140, 31, }, + { 9, 1, 0, 1, 140, 63, }, + { 10, 1, 0, 1, 140, 63, }, + { 11, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 144, 30, }, + { 2, 1, 0, 1, 144, 63, }, { 1, 1, 0, 1, 144, 63, }, { 3, 1, 0, 1, 144, 30, }, - { 4, 1, 0, 1, 144, 63, }, + { 4, 1, 0, 1, 144, 33, }, { 5, 1, 0, 1, 144, 63, }, - { 6, 1, 0, 1, 144, 30, }, + { 6, 1, 0, 1, 144, 33, }, { 7, 1, 0, 1, 144, 63, }, + { 8, 1, 0, 1, 144, 30, }, + { 9, 1, 0, 1, 144, 63, }, + { 10, 1, 0, 1, 144, 63, }, + { 11, 1, 0, 1, 144, 32, }, + { 0, 1, 0, 1, 149, 33, }, + { 2, 1, 0, 1, 149, 14, }, { 1, 1, 0, 1, 149, 63, }, { 3, 1, 0, 1, 149, 30, }, { 4, 1, 0, 1, 149, 33, }, { 5, 1, 0, 1, 149, 33, }, - { 6, 1, 0, 1, 149, 30, }, + { 6, 1, 0, 1, 149, 33, }, { 7, 1, 0, 1, 149, 27, }, + { 8, 1, 0, 1, 149, 33, }, + { 9, 1, 0, 1, 149, 30, }, + { 10, 1, 0, 1, 149, 14, }, + { 11, 1, 0, 1, 149, 31, }, + { 0, 1, 0, 1, 153, 33, }, + { 2, 1, 0, 1, 153, 14, }, { 1, 1, 0, 1, 153, 63, }, { 3, 1, 0, 1, 153, 33, }, { 4, 1, 0, 1, 153, 33, }, { 5, 1, 0, 1, 153, 33, }, - { 6, 1, 0, 1, 153, 30, }, + { 6, 1, 0, 1, 153, 33, }, { 7, 1, 0, 1, 153, 27, }, + { 8, 1, 0, 1, 153, 33, }, + { 9, 1, 0, 1, 153, 30, }, + { 10, 1, 0, 1, 153, 14, }, + { 11, 1, 0, 1, 153, 31, }, + { 0, 1, 0, 1, 157, 33, }, + { 2, 1, 0, 1, 157, 14, }, { 1, 1, 0, 1, 157, 63, }, { 3, 1, 0, 1, 157, 33, }, { 4, 1, 0, 1, 157, 33, }, { 5, 1, 0, 1, 157, 33, }, - { 6, 1, 0, 1, 157, 30, }, + { 6, 1, 0, 1, 157, 33, }, { 7, 1, 0, 1, 157, 27, }, + { 8, 1, 0, 1, 157, 33, }, + { 9, 1, 0, 1, 157, 30, }, + { 10, 1, 0, 1, 157, 14, }, + { 11, 1, 0, 1, 157, 31, }, + { 0, 1, 0, 1, 161, 33, }, + { 2, 1, 0, 1, 161, 14, }, { 1, 1, 0, 1, 161, 63, }, { 3, 1, 0, 1, 161, 33, }, { 4, 1, 0, 1, 161, 31, }, { 5, 1, 0, 1, 161, 33, }, - { 6, 1, 0, 1, 161, 30, }, + { 6, 1, 0, 1, 161, 33, }, { 7, 1, 0, 1, 161, 27, }, + { 8, 1, 0, 1, 161, 33, }, + { 9, 1, 0, 1, 161, 30, }, + { 10, 1, 0, 1, 161, 14, }, + { 11, 1, 0, 1, 161, 31, }, + { 0, 1, 0, 1, 165, 33, }, + { 2, 1, 0, 1, 165, 14, }, { 1, 1, 0, 1, 165, 63, }, { 3, 1, 0, 1, 165, 33, }, - { 4, 1, 0, 1, 165, 63, }, + { 4, 1, 0, 1, 165, 33, }, { 5, 1, 0, 1, 165, 33, }, - { 6, 1, 0, 1, 165, 30, }, + { 6, 1, 0, 1, 165, 33, }, { 7, 1, 0, 1, 165, 27, }, + { 8, 1, 0, 1, 165, 30, }, + { 9, 1, 0, 1, 165, 30, }, + { 10, 1, 0, 1, 165, 14, }, + { 11, 1, 0, 1, 165, 31, }, + { 0, 1, 0, 2, 36, 30, }, + { 2, 1, 0, 2, 36, 32, }, { 1, 1, 0, 2, 36, 33, }, { 3, 1, 0, 2, 36, 30, }, { 4, 1, 0, 2, 36, 27, }, { 5, 1, 0, 2, 36, 32, }, { 6, 1, 0, 2, 36, 30, }, { 7, 1, 0, 2, 36, 27, }, + { 8, 1, 0, 2, 36, 30, }, + { 9, 1, 0, 2, 36, 29, }, + { 10, 1, 0, 2, 36, 63, }, + { 11, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 33, }, + { 2, 1, 0, 2, 40, 32, }, { 1, 1, 0, 2, 40, 33, }, { 3, 1, 0, 2, 40, 31, }, { 4, 1, 0, 2, 40, 29, }, { 5, 1, 0, 2, 40, 32, }, - { 6, 1, 0, 2, 40, 30, }, + { 6, 1, 0, 2, 40, 33, }, { 7, 1, 0, 2, 40, 27, }, + { 8, 1, 0, 2, 40, 31, }, + { 9, 1, 0, 2, 40, 29, }, + { 10, 1, 0, 2, 40, 63, }, + { 11, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 33, }, + { 2, 1, 0, 2, 44, 32, }, { 1, 1, 0, 2, 44, 33, }, { 3, 1, 0, 2, 44, 31, }, { 4, 1, 0, 2, 44, 29, }, { 5, 1, 0, 2, 44, 32, }, - { 6, 1, 0, 2, 44, 30, }, + { 6, 1, 0, 2, 44, 33, }, { 7, 1, 0, 2, 44, 27, }, + { 8, 1, 0, 2, 44, 31, }, + { 9, 1, 0, 2, 44, 29, }, + { 10, 1, 0, 2, 44, 63, }, + { 11, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 33, }, + { 2, 1, 0, 2, 48, 32, }, { 1, 1, 0, 2, 48, 33, }, { 3, 1, 0, 2, 48, 31, }, { 4, 1, 0, 2, 48, 26, }, { 5, 1, 0, 2, 48, 32, }, - { 6, 1, 0, 2, 48, 30, }, + { 6, 1, 0, 2, 48, 33, }, { 7, 1, 0, 2, 48, 27, }, + { 8, 1, 0, 2, 48, 31, }, + { 9, 1, 0, 2, 48, 29, }, + { 10, 1, 0, 2, 48, 63, }, + { 11, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 33, }, + { 2, 1, 0, 2, 52, 32, }, { 1, 1, 0, 2, 52, 33, }, { 3, 1, 0, 2, 52, 32, }, { 4, 1, 0, 2, 52, 7, }, { 5, 1, 0, 2, 52, 32, }, - { 6, 1, 0, 2, 52, 30, }, + { 6, 1, 0, 2, 52, 33, }, { 7, 1, 0, 2, 52, 27, }, + { 8, 1, 0, 2, 52, 33, }, + { 9, 1, 0, 2, 52, 29, }, + { 10, 1, 0, 2, 52, 63, }, + { 11, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 33, }, + { 2, 1, 0, 2, 56, 32, }, { 1, 1, 0, 2, 56, 33, }, { 3, 1, 0, 2, 56, 32, }, { 4, 1, 0, 2, 56, 33, }, { 5, 1, 0, 2, 56, 32, }, - { 6, 1, 0, 2, 56, 30, }, + { 6, 1, 0, 2, 56, 33, }, { 7, 1, 0, 2, 56, 27, }, + { 8, 1, 0, 2, 56, 33, }, + { 9, 1, 0, 2, 56, 29, }, + { 10, 1, 0, 2, 56, 63, }, + { 11, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 33, }, + { 2, 1, 0, 2, 60, 32, }, { 1, 1, 0, 2, 60, 33, }, { 3, 1, 0, 2, 60, 32, }, { 4, 1, 0, 2, 60, 33, }, { 5, 1, 0, 2, 60, 32, }, - { 6, 1, 0, 2, 60, 30, }, + { 6, 1, 0, 2, 60, 33, }, { 7, 1, 0, 2, 60, 27, }, + { 8, 1, 0, 2, 60, 33, }, + { 9, 1, 0, 2, 60, 29, }, + { 10, 1, 0, 2, 60, 63, }, + { 11, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 30, }, + { 2, 1, 0, 2, 64, 32, }, { 1, 1, 0, 2, 64, 33, }, { 3, 1, 0, 2, 64, 30, }, { 4, 1, 0, 2, 64, 33, }, { 5, 1, 0, 2, 64, 32, }, { 6, 1, 0, 2, 64, 30, }, { 7, 1, 0, 2, 64, 27, }, + { 8, 1, 0, 2, 64, 30, }, + { 9, 1, 0, 2, 64, 29, }, + { 10, 1, 0, 2, 64, 63, }, + { 11, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 30, }, + { 2, 1, 0, 2, 100, 32, }, { 1, 1, 0, 2, 100, 33, }, { 3, 1, 0, 2, 100, 30, }, { 4, 1, 0, 2, 100, 33, }, { 5, 1, 0, 2, 100, 32, }, - { 6, 1, 0, 2, 100, 30, }, + { 6, 1, 0, 2, 100, 33, }, { 7, 1, 0, 2, 100, 27, }, + { 8, 1, 0, 2, 100, 30, }, + { 9, 1, 0, 2, 100, 63, }, + { 10, 1, 0, 2, 100, 63, }, + { 11, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 33, }, + { 2, 1, 0, 2, 104, 32, }, { 1, 1, 0, 2, 104, 33, }, { 3, 1, 0, 2, 104, 33, }, { 4, 1, 0, 2, 104, 33, }, { 5, 1, 0, 2, 104, 32, }, - { 6, 1, 0, 2, 104, 30, }, + { 6, 1, 0, 2, 104, 33, }, { 7, 1, 0, 2, 104, 27, }, + { 8, 1, 0, 2, 104, 33, }, + { 9, 1, 0, 2, 104, 63, }, + { 10, 1, 0, 2, 104, 63, }, + { 11, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 33, }, + { 2, 1, 0, 2, 108, 32, }, { 1, 1, 0, 2, 108, 33, }, { 3, 1, 0, 2, 108, 33, }, { 4, 1, 0, 2, 108, 33, }, { 5, 1, 0, 2, 108, 32, }, - { 6, 1, 0, 2, 108, 30, }, + { 6, 1, 0, 2, 108, 33, }, { 7, 1, 0, 2, 108, 27, }, + { 8, 1, 0, 2, 108, 33, }, + { 9, 1, 0, 2, 108, 63, }, + { 10, 1, 0, 2, 108, 63, }, + { 11, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 33, }, + { 2, 1, 0, 2, 112, 32, }, { 1, 1, 0, 2, 112, 33, }, { 3, 1, 0, 2, 112, 33, }, { 4, 1, 0, 2, 112, 33, }, { 5, 1, 0, 2, 112, 32, }, - { 6, 1, 0, 2, 112, 30, }, + { 6, 1, 0, 2, 112, 33, }, { 7, 1, 0, 2, 112, 27, }, + { 8, 1, 0, 2, 112, 33, }, + { 9, 1, 0, 2, 112, 63, }, + { 10, 1, 0, 2, 112, 63, }, + { 11, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 33, }, + { 2, 1, 0, 2, 116, 32, }, { 1, 1, 0, 2, 116, 33, }, { 3, 1, 0, 2, 116, 33, }, { 4, 1, 0, 2, 116, 33, }, { 5, 1, 0, 2, 116, 32, }, - { 6, 1, 0, 2, 116, 30, }, + { 6, 1, 0, 2, 116, 33, }, { 7, 1, 0, 2, 116, 27, }, + { 8, 1, 0, 2, 116, 33, }, + { 9, 1, 0, 2, 116, 63, }, + { 10, 1, 0, 2, 116, 63, }, + { 11, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 33, }, + { 2, 1, 0, 2, 120, 32, }, { 1, 1, 0, 2, 120, 33, }, { 3, 1, 0, 2, 120, 63, }, { 4, 1, 0, 2, 120, 33, }, { 5, 1, 0, 2, 120, 63, }, - { 6, 1, 0, 2, 120, 30, }, + { 6, 1, 0, 2, 120, 33, }, { 7, 1, 0, 2, 120, 27, }, + { 8, 1, 0, 2, 120, 33, }, + { 9, 1, 0, 2, 120, 63, }, + { 10, 1, 0, 2, 120, 63, }, + { 11, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 33, }, + { 2, 1, 0, 2, 124, 32, }, { 1, 1, 0, 2, 124, 33, }, { 3, 1, 0, 2, 124, 63, }, { 4, 1, 0, 2, 124, 33, }, { 5, 1, 0, 2, 124, 63, }, - { 6, 1, 0, 2, 124, 30, }, + { 6, 1, 0, 2, 124, 33, }, { 7, 1, 0, 2, 124, 27, }, + { 8, 1, 0, 2, 124, 33, }, + { 9, 1, 0, 2, 124, 63, }, + { 10, 1, 0, 2, 124, 63, }, + { 11, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 33, }, + { 2, 1, 0, 2, 128, 32, }, { 1, 1, 0, 2, 128, 33, }, { 3, 1, 0, 2, 128, 63, }, - { 4, 1, 0, 2, 128, 63, }, + { 4, 1, 0, 2, 128, 33, }, { 5, 1, 0, 2, 128, 63, }, - { 6, 1, 0, 2, 128, 30, }, + { 6, 1, 0, 2, 128, 33, }, { 7, 1, 0, 2, 128, 27, }, + { 8, 1, 0, 2, 128, 33, }, + { 9, 1, 0, 2, 128, 63, }, + { 10, 1, 0, 2, 128, 63, }, + { 11, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 33, }, + { 2, 1, 0, 2, 132, 32, }, { 1, 1, 0, 2, 132, 33, }, { 3, 1, 0, 2, 132, 33, }, - { 4, 1, 0, 2, 132, 63, }, + { 4, 1, 0, 2, 132, 33, }, { 5, 1, 0, 2, 132, 32, }, - { 6, 1, 0, 2, 132, 30, }, + { 6, 1, 0, 2, 132, 33, }, { 7, 1, 0, 2, 132, 27, }, + { 8, 1, 0, 2, 132, 33, }, + { 9, 1, 0, 2, 132, 63, }, + { 10, 1, 0, 2, 132, 63, }, + { 11, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 33, }, + { 2, 1, 0, 2, 136, 32, }, { 1, 1, 0, 2, 136, 33, }, { 3, 1, 0, 2, 136, 33, }, - { 4, 1, 0, 2, 136, 63, }, + { 4, 1, 0, 2, 136, 33, }, { 5, 1, 0, 2, 136, 32, }, - { 6, 1, 0, 2, 136, 30, }, - { 7, 1, 0, 2, 136, 63, }, + { 6, 1, 0, 2, 136, 33, }, + { 7, 1, 0, 2, 136, 27, }, + { 8, 1, 0, 2, 136, 33, }, + { 9, 1, 0, 2, 136, 63, }, + { 10, 1, 0, 2, 136, 63, }, + { 11, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 29, }, + { 2, 1, 0, 2, 140, 32, }, { 1, 1, 0, 2, 140, 33, }, { 3, 1, 0, 2, 140, 29, }, - { 4, 1, 0, 2, 140, 63, }, + { 4, 1, 0, 2, 140, 33, }, { 5, 1, 0, 2, 140, 32, }, - { 6, 1, 0, 2, 140, 30, }, - { 7, 1, 0, 2, 140, 63, }, + { 6, 1, 0, 2, 140, 33, }, + { 7, 1, 0, 2, 140, 27, }, + { 8, 1, 0, 2, 140, 29, }, + { 9, 1, 0, 2, 140, 63, }, + { 10, 1, 0, 2, 140, 63, }, + { 11, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 144, 27, }, + { 2, 1, 0, 2, 144, 63, }, { 1, 1, 0, 2, 144, 63, }, { 3, 1, 0, 2, 144, 27, }, - { 4, 1, 0, 2, 144, 63, }, + { 4, 1, 0, 2, 144, 33, }, { 5, 1, 0, 2, 144, 63, }, - { 6, 1, 0, 2, 144, 30, }, + { 6, 1, 0, 2, 144, 33, }, { 7, 1, 0, 2, 144, 63, }, + { 8, 1, 0, 2, 144, 27, }, + { 9, 1, 0, 2, 144, 63, }, + { 10, 1, 0, 2, 144, 63, }, + { 11, 1, 0, 2, 144, 31, }, + { 0, 1, 0, 2, 149, 33, }, + { 2, 1, 0, 2, 149, 14, }, { 1, 1, 0, 2, 149, 63, }, { 3, 1, 0, 2, 149, 33, }, { 4, 1, 0, 2, 149, 33, }, { 5, 1, 0, 2, 149, 33, }, - { 6, 1, 0, 2, 149, 30, }, + { 6, 1, 0, 2, 149, 33, }, { 7, 1, 0, 2, 149, 27, }, + { 8, 1, 0, 2, 149, 33, }, + { 9, 1, 0, 2, 149, 31, }, + { 10, 1, 0, 2, 149, 14, }, + { 11, 1, 0, 2, 149, 31, }, + { 0, 1, 0, 2, 153, 33, }, + { 2, 1, 0, 2, 153, 14, }, { 1, 1, 0, 2, 153, 63, }, { 3, 1, 0, 2, 153, 33, }, { 4, 1, 0, 2, 153, 33, }, { 5, 1, 0, 2, 153, 33, }, - { 6, 1, 0, 2, 153, 30, }, + { 6, 1, 0, 2, 153, 33, }, { 7, 1, 0, 2, 153, 27, }, + { 8, 1, 0, 2, 153, 33, }, + { 9, 1, 0, 2, 153, 31, }, + { 10, 1, 0, 2, 153, 14, }, + { 11, 1, 0, 2, 153, 31, }, + { 0, 1, 0, 2, 157, 33, }, + { 2, 1, 0, 2, 157, 14, }, { 1, 1, 0, 2, 157, 63, }, { 3, 1, 0, 2, 157, 33, }, { 4, 1, 0, 2, 157, 33, }, { 5, 1, 0, 2, 157, 33, }, - { 6, 1, 0, 2, 157, 30, }, + { 6, 1, 0, 2, 157, 33, }, { 7, 1, 0, 2, 157, 27, }, + { 8, 1, 0, 2, 157, 33, }, + { 9, 1, 0, 2, 157, 31, }, + { 10, 1, 0, 2, 157, 14, }, + { 11, 1, 0, 2, 157, 31, }, + { 0, 1, 0, 2, 161, 33, }, + { 2, 1, 0, 2, 161, 14, }, { 1, 1, 0, 2, 161, 63, }, { 3, 1, 0, 2, 161, 33, }, { 4, 1, 0, 2, 161, 31, }, { 5, 1, 0, 2, 161, 33, }, - { 6, 1, 0, 2, 161, 30, }, + { 6, 1, 0, 2, 161, 33, }, { 7, 1, 0, 2, 161, 27, }, + { 8, 1, 0, 2, 161, 33, }, + { 9, 1, 0, 2, 161, 31, }, + { 10, 1, 0, 2, 161, 14, }, + { 11, 1, 0, 2, 161, 31, }, + { 0, 1, 0, 2, 165, 33, }, + { 2, 1, 0, 2, 165, 14, }, { 1, 1, 0, 2, 165, 63, }, { 3, 1, 0, 2, 165, 33, }, - { 4, 1, 0, 2, 165, 63, }, + { 4, 1, 0, 2, 165, 33, }, { 5, 1, 0, 2, 165, 33, }, - { 6, 1, 0, 2, 165, 30, }, + { 6, 1, 0, 2, 165, 33, }, { 7, 1, 0, 2, 165, 27, }, + { 8, 1, 0, 2, 165, 30, }, + { 9, 1, 0, 2, 165, 31, }, + { 10, 1, 0, 2, 165, 14, }, + { 11, 1, 0, 2, 165, 31, }, + { 0, 1, 1, 2, 38, 22, }, + { 2, 1, 1, 2, 38, 32, }, { 1, 1, 1, 2, 38, 32, }, { 3, 1, 1, 2, 38, 22, }, { 4, 1, 1, 2, 38, 26, }, { 5, 1, 1, 2, 38, 32, }, { 6, 1, 1, 2, 38, 22, }, { 7, 1, 1, 2, 38, 27, }, + { 8, 1, 1, 2, 38, 22, }, + { 9, 1, 1, 2, 38, 29, }, + { 10, 1, 1, 2, 38, 63, }, + { 11, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 32, }, + { 2, 1, 1, 2, 46, 32, }, { 1, 1, 1, 2, 46, 32, }, { 3, 1, 1, 2, 46, 32, }, { 4, 1, 1, 2, 46, 28, }, { 5, 1, 1, 2, 46, 32, }, - { 6, 1, 1, 2, 46, 30, }, + { 6, 1, 1, 2, 46, 32, }, { 7, 1, 1, 2, 46, 27, }, + { 8, 1, 1, 2, 46, 31, }, + { 9, 1, 1, 2, 46, 29, }, + { 10, 1, 1, 2, 46, 63, }, + { 11, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, { 1, 1, 1, 2, 54, 32, }, { 3, 1, 1, 2, 54, 32, }, { 4, 1, 1, 2, 54, 22, }, { 5, 1, 1, 2, 54, 32, }, - { 6, 1, 1, 2, 54, 30, }, + { 6, 1, 1, 2, 54, 32, }, { 7, 1, 1, 2, 54, 27, }, + { 8, 1, 1, 2, 54, 32, }, + { 9, 1, 1, 2, 54, 28, }, + { 10, 1, 1, 2, 54, 63, }, + { 11, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 23, }, + { 2, 1, 1, 2, 62, 32, }, { 1, 1, 1, 2, 62, 32, }, { 3, 1, 1, 2, 62, 23, }, { 4, 1, 1, 2, 62, 31, }, { 5, 1, 1, 2, 62, 32, }, { 6, 1, 1, 2, 62, 23, }, { 7, 1, 1, 2, 62, 27, }, + { 8, 1, 1, 2, 62, 23, }, + { 9, 1, 1, 2, 62, 28, }, + { 10, 1, 1, 2, 62, 63, }, + { 11, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 21, }, + { 2, 1, 1, 2, 102, 32, }, { 1, 1, 1, 2, 102, 32, }, { 3, 1, 1, 2, 102, 21, }, { 4, 1, 1, 2, 102, 31, }, { 5, 1, 1, 2, 102, 32, }, - { 6, 1, 1, 2, 102, 30, }, + { 6, 1, 1, 2, 102, 32, }, { 7, 1, 1, 2, 102, 27, }, + { 8, 1, 1, 2, 102, 21, }, + { 9, 1, 1, 2, 102, 63, }, + { 10, 1, 1, 2, 102, 63, }, + { 11, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, { 1, 1, 1, 2, 110, 32, }, { 3, 1, 1, 2, 110, 32, }, { 4, 1, 1, 2, 110, 32, }, { 5, 1, 1, 2, 110, 32, }, - { 6, 1, 1, 2, 110, 30, }, + { 6, 1, 1, 2, 110, 32, }, { 7, 1, 1, 2, 110, 27, }, + { 8, 1, 1, 2, 110, 32, }, + { 9, 1, 1, 2, 110, 63, }, + { 10, 1, 1, 2, 110, 63, }, + { 11, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 32, }, + { 2, 1, 1, 2, 118, 32, }, { 1, 1, 1, 2, 118, 32, }, { 3, 1, 1, 2, 118, 63, }, { 4, 1, 1, 2, 118, 32, }, { 5, 1, 1, 2, 118, 63, }, - { 6, 1, 1, 2, 118, 30, }, + { 6, 1, 1, 2, 118, 32, }, { 7, 1, 1, 2, 118, 27, }, + { 8, 1, 1, 2, 118, 32, }, + { 9, 1, 1, 2, 118, 63, }, + { 10, 1, 1, 2, 118, 63, }, + { 11, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 32, }, + { 2, 1, 1, 2, 126, 32, }, { 1, 1, 1, 2, 126, 32, }, { 3, 1, 1, 2, 126, 63, }, - { 4, 1, 1, 2, 126, 63, }, + { 4, 1, 1, 2, 126, 32, }, { 5, 1, 1, 2, 126, 63, }, - { 6, 1, 1, 2, 126, 30, }, + { 6, 1, 1, 2, 126, 32, }, { 7, 1, 1, 2, 126, 27, }, + { 8, 1, 1, 2, 126, 32, }, + { 9, 1, 1, 2, 126, 63, }, + { 10, 1, 1, 2, 126, 63, }, + { 11, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 32, }, + { 2, 1, 1, 2, 134, 32, }, { 1, 1, 1, 2, 134, 32, }, { 3, 1, 1, 2, 134, 32, }, - { 4, 1, 1, 2, 134, 63, }, + { 4, 1, 1, 2, 134, 32, }, { 5, 1, 1, 2, 134, 32, }, - { 6, 1, 1, 2, 134, 30, }, - { 7, 1, 1, 2, 134, 63, }, + { 6, 1, 1, 2, 134, 32, }, + { 7, 1, 1, 2, 134, 27, }, + { 8, 1, 1, 2, 134, 32, }, + { 9, 1, 1, 2, 134, 63, }, + { 10, 1, 1, 2, 134, 63, }, + { 11, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 142, 29, }, + { 2, 1, 1, 2, 142, 63, }, { 1, 1, 1, 2, 142, 63, }, { 3, 1, 1, 2, 142, 29, }, - { 4, 1, 1, 2, 142, 63, }, + { 4, 1, 1, 2, 142, 32, }, { 5, 1, 1, 2, 142, 63, }, - { 6, 1, 1, 2, 142, 30, }, + { 6, 1, 1, 2, 142, 32, }, { 7, 1, 1, 2, 142, 63, }, + { 8, 1, 1, 2, 142, 29, }, + { 9, 1, 1, 2, 142, 63, }, + { 10, 1, 1, 2, 142, 63, }, + { 11, 1, 1, 2, 142, 31, }, + { 0, 1, 1, 2, 151, 32, }, + { 2, 1, 1, 2, 151, 14, }, { 1, 1, 1, 2, 151, 63, }, { 3, 1, 1, 2, 151, 32, }, { 4, 1, 1, 2, 151, 27, }, { 5, 1, 1, 2, 151, 32, }, - { 6, 1, 1, 2, 151, 30, }, + { 6, 1, 1, 2, 151, 32, }, { 7, 1, 1, 2, 151, 27, }, + { 8, 1, 1, 2, 151, 32, }, + { 9, 1, 1, 2, 151, 27, }, + { 10, 1, 1, 2, 151, 14, }, + { 11, 1, 1, 2, 151, 30, }, + { 0, 1, 1, 2, 159, 32, }, + { 2, 1, 1, 2, 159, 14, }, { 1, 1, 1, 2, 159, 63, }, { 3, 1, 1, 2, 159, 32, }, { 4, 1, 1, 2, 159, 26, }, { 5, 1, 1, 2, 159, 32, }, - { 6, 1, 1, 2, 159, 30, }, + { 6, 1, 1, 2, 159, 32, }, { 7, 1, 1, 2, 159, 27, }, + { 8, 1, 1, 2, 159, 32, }, + { 9, 1, 1, 2, 159, 31, }, + { 10, 1, 1, 2, 159, 14, }, + { 11, 1, 1, 2, 159, 30, }, + { 0, 1, 2, 4, 42, 19, }, + { 2, 1, 2, 4, 42, 32, }, { 1, 1, 2, 4, 42, 28, }, { 3, 1, 2, 4, 42, 19, }, { 4, 1, 2, 4, 42, 25, }, { 5, 1, 2, 4, 42, 32, }, { 6, 1, 2, 4, 42, 19, }, { 7, 1, 2, 4, 42, 27, }, + { 8, 1, 2, 4, 42, 19, }, + { 9, 1, 2, 4, 42, 25, }, + { 10, 1, 2, 4, 42, 63, }, + { 11, 1, 2, 4, 42, 32, }, + { 0, 1, 2, 4, 58, 22, }, + { 2, 1, 2, 4, 58, 32, }, { 1, 1, 2, 4, 58, 28, }, { 3, 1, 2, 4, 58, 22, }, { 4, 1, 2, 4, 58, 28, }, { 5, 1, 2, 4, 58, 32, }, { 6, 1, 2, 4, 58, 22, }, { 7, 1, 2, 4, 58, 27, }, + { 8, 1, 2, 4, 58, 22, }, + { 9, 1, 2, 4, 58, 23, }, + { 10, 1, 2, 4, 58, 63, }, + { 11, 1, 2, 4, 58, 32, }, + { 0, 1, 2, 4, 106, 18, }, + { 2, 1, 2, 4, 106, 32, }, { 1, 1, 2, 4, 106, 32, }, { 3, 1, 2, 4, 106, 18, }, { 4, 1, 2, 4, 106, 30, }, { 5, 1, 2, 4, 106, 32, }, - { 6, 1, 2, 4, 106, 30, }, + { 6, 1, 2, 4, 106, 32, }, { 7, 1, 2, 4, 106, 27, }, + { 8, 1, 2, 4, 106, 18, }, + { 9, 1, 2, 4, 106, 63, }, + { 10, 1, 2, 4, 106, 63, }, + { 11, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 32, }, + { 2, 1, 2, 4, 122, 32, }, { 1, 1, 2, 4, 122, 32, }, { 3, 1, 2, 4, 122, 63, }, { 4, 1, 2, 4, 122, 26, }, { 5, 1, 2, 4, 122, 63, }, - { 6, 1, 2, 4, 122, 30, }, + { 6, 1, 2, 4, 122, 32, }, { 7, 1, 2, 4, 122, 27, }, + { 8, 1, 2, 4, 122, 32, }, + { 9, 1, 2, 4, 122, 63, }, + { 10, 1, 2, 4, 122, 63, }, + { 11, 1, 2, 4, 122, 32, }, + { 0, 1, 2, 4, 138, 28, }, + { 2, 1, 2, 4, 138, 63, }, { 1, 1, 2, 4, 138, 63, }, { 3, 1, 2, 4, 138, 28, }, - { 4, 1, 2, 4, 138, 63, }, + { 4, 1, 2, 4, 138, 32, }, { 5, 1, 2, 4, 138, 63, }, - { 6, 1, 2, 4, 138, 30, }, + { 6, 1, 2, 4, 138, 32, }, { 7, 1, 2, 4, 138, 63, }, + { 8, 1, 2, 4, 138, 28, }, + { 9, 1, 2, 4, 138, 63, }, + { 10, 1, 2, 4, 138, 63, }, + { 11, 1, 2, 4, 138, 30, }, + { 0, 1, 2, 4, 155, 32, }, + { 2, 1, 2, 4, 155, 14, }, { 1, 1, 2, 4, 155, 63, }, { 3, 1, 2, 4, 155, 32, }, { 4, 1, 2, 4, 155, 27, }, { 5, 1, 2, 4, 155, 32, }, - { 6, 1, 2, 4, 155, 30, }, + { 6, 1, 2, 4, 155, 32, }, { 7, 1, 2, 4, 155, 27, }, + { 8, 1, 2, 4, 155, 32, }, + { 9, 1, 2, 4, 155, 20, }, + { 10, 1, 2, 4, 155, 14, }, + { 11, 1, 2, 4, 155, 30, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8821c_txpwr_lmt_type0); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c index f9e3d0779c..5699846a39 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c @@ -39832,6 +39832,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 1, 60, }, { 8, 0, 0, 0, 1, 72, }, { 9, 0, 0, 0, 1, 60, }, + { 10, 0, 0, 0, 1, 60, }, + { 11, 0, 0, 0, 1, 60, }, { 0, 0, 0, 0, 2, 72, }, { 2, 0, 0, 0, 2, 60, }, { 1, 0, 0, 0, 2, 68, }, @@ -39842,6 +39844,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 2, 60, }, { 8, 0, 0, 0, 2, 72, }, { 9, 0, 0, 0, 2, 60, }, + { 10, 0, 0, 0, 2, 60, }, + { 11, 0, 0, 0, 2, 60, }, { 0, 0, 0, 0, 3, 76, }, { 2, 0, 0, 0, 3, 60, }, { 1, 0, 0, 0, 3, 68, }, @@ -39852,6 +39856,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 3, 60, }, { 8, 0, 0, 0, 3, 76, }, { 9, 0, 0, 0, 3, 60, }, + { 10, 0, 0, 0, 3, 60, }, + { 11, 0, 0, 0, 3, 60, }, { 0, 0, 0, 0, 4, 76, }, { 2, 0, 0, 0, 4, 60, }, { 1, 0, 0, 0, 4, 68, }, @@ -39862,6 +39868,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 4, 60, }, { 8, 0, 0, 0, 4, 76, }, { 9, 0, 0, 0, 4, 60, }, + { 10, 0, 0, 0, 4, 60, }, + { 11, 0, 0, 0, 4, 60, }, { 0, 0, 0, 0, 5, 76, }, { 2, 0, 0, 0, 5, 60, }, { 1, 0, 0, 0, 5, 68, }, @@ -39872,6 +39880,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 5, 60, }, { 8, 0, 0, 0, 5, 76, }, { 9, 0, 0, 0, 5, 60, }, + { 10, 0, 0, 0, 5, 60, }, + { 11, 0, 0, 0, 5, 60, }, { 0, 0, 0, 0, 6, 76, }, { 2, 0, 0, 0, 6, 60, }, { 1, 0, 0, 0, 6, 68, }, @@ -39882,6 +39892,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 6, 60, }, { 8, 0, 0, 0, 6, 76, }, { 9, 0, 0, 0, 6, 60, }, + { 10, 0, 0, 0, 6, 60, }, + { 11, 0, 0, 0, 6, 60, }, { 0, 0, 0, 0, 7, 76, }, { 2, 0, 0, 0, 7, 60, }, { 1, 0, 0, 0, 7, 68, }, @@ -39892,6 +39904,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 7, 60, }, { 8, 0, 0, 0, 7, 76, }, { 9, 0, 0, 0, 7, 60, }, + { 10, 0, 0, 0, 7, 60, }, + { 11, 0, 0, 0, 7, 60, }, { 0, 0, 0, 0, 8, 76, }, { 2, 0, 0, 0, 8, 60, }, { 1, 0, 0, 0, 8, 68, }, @@ -39902,6 +39916,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 8, 60, }, { 8, 0, 0, 0, 8, 76, }, { 9, 0, 0, 0, 8, 60, }, + { 10, 0, 0, 0, 8, 60, }, + { 11, 0, 0, 0, 8, 60, }, { 0, 0, 0, 0, 9, 76, }, { 2, 0, 0, 0, 9, 60, }, { 1, 0, 0, 0, 9, 68, }, @@ -39912,6 +39928,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 9, 60, }, { 8, 0, 0, 0, 9, 76, }, { 9, 0, 0, 0, 9, 60, }, + { 10, 0, 0, 0, 9, 60, }, + { 11, 0, 0, 0, 9, 60, }, { 0, 0, 0, 0, 10, 72, }, { 2, 0, 0, 0, 10, 60, }, { 1, 0, 0, 0, 10, 68, }, @@ -39922,6 +39940,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 10, 60, }, { 8, 0, 0, 0, 10, 72, }, { 9, 0, 0, 0, 10, 60, }, + { 10, 0, 0, 0, 10, 60, }, + { 11, 0, 0, 0, 10, 60, }, { 0, 0, 0, 0, 11, 72, }, { 2, 0, 0, 0, 11, 60, }, { 1, 0, 0, 0, 11, 68, }, @@ -39932,7 +39952,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 11, 60, }, { 8, 0, 0, 0, 11, 72, }, { 9, 0, 0, 0, 11, 60, }, - { 0, 0, 0, 0, 12, 44, }, + { 10, 0, 0, 0, 11, 60, }, + { 11, 0, 0, 0, 11, 60, }, + { 0, 0, 0, 0, 12, 52, }, { 2, 0, 0, 0, 12, 60, }, { 1, 0, 0, 0, 12, 68, }, { 3, 0, 0, 0, 12, 52, }, @@ -39942,7 +39964,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 12, 60, }, { 8, 0, 0, 0, 12, 52, }, { 9, 0, 0, 0, 12, 60, }, - { 0, 0, 0, 0, 13, 40, }, + { 10, 0, 0, 0, 12, 60, }, + { 11, 0, 0, 0, 12, 60, }, + { 0, 0, 0, 0, 13, 48, }, { 2, 0, 0, 0, 13, 60, }, { 1, 0, 0, 0, 13, 68, }, { 3, 0, 0, 0, 13, 48, }, @@ -39952,6 +39976,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 13, 60, }, { 8, 0, 0, 0, 13, 48, }, { 9, 0, 0, 0, 13, 60, }, + { 10, 0, 0, 0, 13, 60, }, + { 11, 0, 0, 0, 13, 60, }, { 0, 0, 0, 0, 14, 127, }, { 2, 0, 0, 0, 14, 127, }, { 1, 0, 0, 0, 14, 68, }, @@ -39962,6 +39988,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 14, 127, }, { 8, 0, 0, 0, 14, 127, }, { 9, 0, 0, 0, 14, 127, }, + { 10, 0, 0, 0, 14, 127, }, + { 11, 0, 0, 0, 14, 127, }, { 0, 0, 0, 1, 1, 52, }, { 2, 0, 0, 1, 1, 60, }, { 1, 0, 0, 1, 1, 76, }, @@ -39972,6 +40000,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 1, 60, }, { 8, 0, 0, 1, 1, 52, }, { 9, 0, 0, 1, 1, 60, }, + { 10, 0, 0, 1, 1, 60, }, + { 11, 0, 0, 1, 1, 60, }, { 0, 0, 0, 1, 2, 60, }, { 2, 0, 0, 1, 2, 60, }, { 1, 0, 0, 1, 2, 76, }, @@ -39982,6 +40012,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 2, 60, }, { 8, 0, 0, 1, 2, 60, }, { 9, 0, 0, 1, 2, 60, }, + { 10, 0, 0, 1, 2, 60, }, + { 11, 0, 0, 1, 2, 60, }, { 0, 0, 0, 1, 3, 64, }, { 2, 0, 0, 1, 3, 60, }, { 1, 0, 0, 1, 3, 76, }, @@ -39992,6 +40024,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 3, 60, }, { 8, 0, 0, 1, 3, 64, }, { 9, 0, 0, 1, 3, 60, }, + { 10, 0, 0, 1, 3, 60, }, + { 11, 0, 0, 1, 3, 60, }, { 0, 0, 0, 1, 4, 68, }, { 2, 0, 0, 1, 4, 60, }, { 1, 0, 0, 1, 4, 76, }, @@ -40002,6 +40036,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 4, 60, }, { 8, 0, 0, 1, 4, 68, }, { 9, 0, 0, 1, 4, 60, }, + { 10, 0, 0, 1, 4, 60, }, + { 11, 0, 0, 1, 4, 60, }, { 0, 0, 0, 1, 5, 76, }, { 2, 0, 0, 1, 5, 60, }, { 1, 0, 0, 1, 5, 76, }, @@ -40012,6 +40048,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 5, 60, }, { 8, 0, 0, 1, 5, 76, }, { 9, 0, 0, 1, 5, 60, }, + { 10, 0, 0, 1, 5, 60, }, + { 11, 0, 0, 1, 5, 60, }, { 0, 0, 0, 1, 6, 76, }, { 2, 0, 0, 1, 6, 60, }, { 1, 0, 0, 1, 6, 76, }, @@ -40022,6 +40060,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 6, 60, }, { 8, 0, 0, 1, 6, 76, }, { 9, 0, 0, 1, 6, 60, }, + { 10, 0, 0, 1, 6, 60, }, + { 11, 0, 0, 1, 6, 60, }, { 0, 0, 0, 1, 7, 76, }, { 2, 0, 0, 1, 7, 60, }, { 1, 0, 0, 1, 7, 76, }, @@ -40032,6 +40072,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 7, 60, }, { 8, 0, 0, 1, 7, 76, }, { 9, 0, 0, 1, 7, 60, }, + { 10, 0, 0, 1, 7, 60, }, + { 11, 0, 0, 1, 7, 60, }, { 0, 0, 0, 1, 8, 68, }, { 2, 0, 0, 1, 8, 60, }, { 1, 0, 0, 1, 8, 76, }, @@ -40042,6 +40084,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 8, 60, }, { 8, 0, 0, 1, 8, 68, }, { 9, 0, 0, 1, 8, 60, }, + { 10, 0, 0, 1, 8, 60, }, + { 11, 0, 0, 1, 8, 60, }, { 0, 0, 0, 1, 9, 64, }, { 2, 0, 0, 1, 9, 60, }, { 1, 0, 0, 1, 9, 76, }, @@ -40052,6 +40096,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 9, 60, }, { 8, 0, 0, 1, 9, 64, }, { 9, 0, 0, 1, 9, 60, }, + { 10, 0, 0, 1, 9, 60, }, + { 11, 0, 0, 1, 9, 60, }, { 0, 0, 0, 1, 10, 60, }, { 2, 0, 0, 1, 10, 60, }, { 1, 0, 0, 1, 10, 76, }, @@ -40062,6 +40108,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 10, 60, }, { 8, 0, 0, 1, 10, 60, }, { 9, 0, 0, 1, 10, 60, }, + { 10, 0, 0, 1, 10, 60, }, + { 11, 0, 0, 1, 10, 60, }, { 0, 0, 0, 1, 11, 52, }, { 2, 0, 0, 1, 11, 60, }, { 1, 0, 0, 1, 11, 76, }, @@ -40071,8 +40119,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 1, 11, 52, }, { 7, 0, 0, 1, 11, 60, }, { 8, 0, 0, 1, 11, 52, }, - { 9, 0, 0, 1, 11, 60, }, - { 0, 0, 0, 1, 12, 32, }, + { 9, 0, 0, 1, 11, 44, }, + { 10, 0, 0, 1, 11, 60, }, + { 11, 0, 0, 1, 11, 60, }, + { 0, 0, 0, 1, 12, 40, }, { 2, 0, 0, 1, 12, 60, }, { 1, 0, 0, 1, 12, 76, }, { 3, 0, 0, 1, 12, 40, }, @@ -40081,8 +40131,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 1, 12, 40, }, { 7, 0, 0, 1, 12, 60, }, { 8, 0, 0, 1, 12, 40, }, - { 9, 0, 0, 1, 12, 60, }, - { 0, 0, 0, 1, 13, 20, }, + { 9, 0, 0, 1, 12, 44, }, + { 10, 0, 0, 1, 12, 60, }, + { 11, 0, 0, 1, 12, 60, }, + { 0, 0, 0, 1, 13, 28, }, { 2, 0, 0, 1, 13, 60, }, { 1, 0, 0, 1, 13, 76, }, { 3, 0, 0, 1, 13, 28, }, @@ -40091,7 +40143,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 1, 13, 28, }, { 7, 0, 0, 1, 13, 60, }, { 8, 0, 0, 1, 13, 28, }, - { 9, 0, 0, 1, 13, 60, }, + { 9, 0, 0, 1, 13, 36, }, + { 10, 0, 0, 1, 13, 60, }, + { 11, 0, 0, 1, 13, 60, }, { 0, 0, 0, 1, 14, 127, }, { 2, 0, 0, 1, 14, 127, }, { 1, 0, 0, 1, 14, 127, }, @@ -40102,6 +40156,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 14, 127, }, { 8, 0, 0, 1, 14, 127, }, { 9, 0, 0, 1, 14, 127, }, + { 10, 0, 0, 1, 14, 127, }, + { 11, 0, 0, 1, 14, 127, }, { 0, 0, 0, 2, 1, 52, }, { 2, 0, 0, 2, 1, 60, }, { 1, 0, 0, 2, 1, 76, }, @@ -40112,6 +40168,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 1, 60, }, { 8, 0, 0, 2, 1, 52, }, { 9, 0, 0, 2, 1, 60, }, + { 10, 0, 0, 2, 1, 60, }, + { 11, 0, 0, 2, 1, 60, }, { 0, 0, 0, 2, 2, 60, }, { 2, 0, 0, 2, 2, 60, }, { 1, 0, 0, 2, 2, 76, }, @@ -40122,6 +40180,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 2, 60, }, { 8, 0, 0, 2, 2, 60, }, { 9, 0, 0, 2, 2, 60, }, + { 10, 0, 0, 2, 2, 60, }, + { 11, 0, 0, 2, 2, 60, }, { 0, 0, 0, 2, 3, 64, }, { 2, 0, 0, 2, 3, 60, }, { 1, 0, 0, 2, 3, 76, }, @@ -40132,6 +40192,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 3, 60, }, { 8, 0, 0, 2, 3, 64, }, { 9, 0, 0, 2, 3, 60, }, + { 10, 0, 0, 2, 3, 60, }, + { 11, 0, 0, 2, 3, 60, }, { 0, 0, 0, 2, 4, 68, }, { 2, 0, 0, 2, 4, 60, }, { 1, 0, 0, 2, 4, 76, }, @@ -40142,6 +40204,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 4, 60, }, { 8, 0, 0, 2, 4, 68, }, { 9, 0, 0, 2, 4, 60, }, + { 10, 0, 0, 2, 4, 60, }, + { 11, 0, 0, 2, 4, 60, }, { 0, 0, 0, 2, 5, 76, }, { 2, 0, 0, 2, 5, 60, }, { 1, 0, 0, 2, 5, 76, }, @@ -40152,6 +40216,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 5, 60, }, { 8, 0, 0, 2, 5, 76, }, { 9, 0, 0, 2, 5, 60, }, + { 10, 0, 0, 2, 5, 60, }, + { 11, 0, 0, 2, 5, 60, }, { 0, 0, 0, 2, 6, 76, }, { 2, 0, 0, 2, 6, 60, }, { 1, 0, 0, 2, 6, 76, }, @@ -40162,6 +40228,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 6, 60, }, { 8, 0, 0, 2, 6, 76, }, { 9, 0, 0, 2, 6, 60, }, + { 10, 0, 0, 2, 6, 60, }, + { 11, 0, 0, 2, 6, 60, }, { 0, 0, 0, 2, 7, 76, }, { 2, 0, 0, 2, 7, 60, }, { 1, 0, 0, 2, 7, 76, }, @@ -40172,6 +40240,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 7, 60, }, { 8, 0, 0, 2, 7, 76, }, { 9, 0, 0, 2, 7, 60, }, + { 10, 0, 0, 2, 7, 60, }, + { 11, 0, 0, 2, 7, 60, }, { 0, 0, 0, 2, 8, 68, }, { 2, 0, 0, 2, 8, 60, }, { 1, 0, 0, 2, 8, 76, }, @@ -40182,6 +40252,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 8, 60, }, { 8, 0, 0, 2, 8, 68, }, { 9, 0, 0, 2, 8, 60, }, + { 10, 0, 0, 2, 8, 60, }, + { 11, 0, 0, 2, 8, 60, }, { 0, 0, 0, 2, 9, 64, }, { 2, 0, 0, 2, 9, 60, }, { 1, 0, 0, 2, 9, 76, }, @@ -40192,6 +40264,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 9, 60, }, { 8, 0, 0, 2, 9, 64, }, { 9, 0, 0, 2, 9, 60, }, + { 10, 0, 0, 2, 9, 60, }, + { 11, 0, 0, 2, 9, 60, }, { 0, 0, 0, 2, 10, 60, }, { 2, 0, 0, 2, 10, 60, }, { 1, 0, 0, 2, 10, 76, }, @@ -40202,6 +40276,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 10, 60, }, { 8, 0, 0, 2, 10, 60, }, { 9, 0, 0, 2, 10, 60, }, + { 10, 0, 0, 2, 10, 60, }, + { 11, 0, 0, 2, 10, 60, }, { 0, 0, 0, 2, 11, 52, }, { 2, 0, 0, 2, 11, 60, }, { 1, 0, 0, 2, 11, 76, }, @@ -40211,8 +40287,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 2, 11, 52, }, { 7, 0, 0, 2, 11, 60, }, { 8, 0, 0, 2, 11, 52, }, - { 9, 0, 0, 2, 11, 60, }, - { 0, 0, 0, 2, 12, 32, }, + { 9, 0, 0, 2, 11, 46, }, + { 10, 0, 0, 2, 11, 60, }, + { 11, 0, 0, 2, 11, 60, }, + { 0, 0, 0, 2, 12, 40, }, { 2, 0, 0, 2, 12, 60, }, { 1, 0, 0, 2, 12, 76, }, { 3, 0, 0, 2, 12, 40, }, @@ -40221,8 +40299,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 2, 12, 40, }, { 7, 0, 0, 2, 12, 60, }, { 8, 0, 0, 2, 12, 40, }, - { 9, 0, 0, 2, 12, 60, }, - { 0, 0, 0, 2, 13, 20, }, + { 9, 0, 0, 2, 12, 42, }, + { 10, 0, 0, 2, 12, 60, }, + { 11, 0, 0, 2, 12, 60, }, + { 0, 0, 0, 2, 13, 28, }, { 2, 0, 0, 2, 13, 60, }, { 1, 0, 0, 2, 13, 76, }, { 3, 0, 0, 2, 13, 28, }, @@ -40231,7 +40311,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 2, 13, 28, }, { 7, 0, 0, 2, 13, 60, }, { 8, 0, 0, 2, 13, 28, }, - { 9, 0, 0, 2, 13, 60, }, + { 9, 0, 0, 2, 13, 34, }, + { 10, 0, 0, 2, 13, 60, }, + { 11, 0, 0, 2, 13, 60, }, { 0, 0, 0, 2, 14, 127, }, { 2, 0, 0, 2, 14, 127, }, { 1, 0, 0, 2, 14, 127, }, @@ -40242,6 +40324,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 14, 127, }, { 8, 0, 0, 2, 14, 127, }, { 9, 0, 0, 2, 14, 127, }, + { 10, 0, 0, 2, 14, 127, }, + { 11, 0, 0, 2, 14, 127, }, { 0, 0, 0, 3, 1, 52, }, { 2, 0, 0, 3, 1, 36, }, { 1, 0, 0, 3, 1, 66, }, @@ -40252,6 +40336,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 1, 36, }, { 8, 0, 0, 3, 1, 52, }, { 9, 0, 0, 3, 1, 36, }, + { 10, 0, 0, 3, 1, 36, }, + { 11, 0, 0, 3, 1, 36, }, { 0, 0, 0, 3, 2, 60, }, { 2, 0, 0, 3, 2, 36, }, { 1, 0, 0, 3, 2, 66, }, @@ -40262,6 +40348,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 2, 36, }, { 8, 0, 0, 3, 2, 60, }, { 9, 0, 0, 3, 2, 36, }, + { 10, 0, 0, 3, 2, 36, }, + { 11, 0, 0, 3, 2, 36, }, { 0, 0, 0, 3, 3, 64, }, { 2, 0, 0, 3, 3, 36, }, { 1, 0, 0, 3, 3, 66, }, @@ -40272,6 +40360,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 3, 36, }, { 8, 0, 0, 3, 3, 64, }, { 9, 0, 0, 3, 3, 36, }, + { 10, 0, 0, 3, 3, 36, }, + { 11, 0, 0, 3, 3, 36, }, { 0, 0, 0, 3, 4, 68, }, { 2, 0, 0, 3, 4, 36, }, { 1, 0, 0, 3, 4, 66, }, @@ -40282,6 +40372,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 4, 36, }, { 8, 0, 0, 3, 4, 68, }, { 9, 0, 0, 3, 4, 36, }, + { 10, 0, 0, 3, 4, 36, }, + { 11, 0, 0, 3, 4, 36, }, { 0, 0, 0, 3, 5, 76, }, { 2, 0, 0, 3, 5, 36, }, { 1, 0, 0, 3, 5, 66, }, @@ -40292,6 +40384,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 5, 36, }, { 8, 0, 0, 3, 5, 76, }, { 9, 0, 0, 3, 5, 36, }, + { 10, 0, 0, 3, 5, 36, }, + { 11, 0, 0, 3, 5, 36, }, { 0, 0, 0, 3, 6, 76, }, { 2, 0, 0, 3, 6, 36, }, { 1, 0, 0, 3, 6, 66, }, @@ -40302,6 +40396,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 6, 36, }, { 8, 0, 0, 3, 6, 76, }, { 9, 0, 0, 3, 6, 36, }, + { 10, 0, 0, 3, 6, 36, }, + { 11, 0, 0, 3, 6, 36, }, { 0, 0, 0, 3, 7, 76, }, { 2, 0, 0, 3, 7, 36, }, { 1, 0, 0, 3, 7, 66, }, @@ -40312,6 +40408,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 7, 36, }, { 8, 0, 0, 3, 7, 76, }, { 9, 0, 0, 3, 7, 36, }, + { 10, 0, 0, 3, 7, 36, }, + { 11, 0, 0, 3, 7, 36, }, { 0, 0, 0, 3, 8, 68, }, { 2, 0, 0, 3, 8, 36, }, { 1, 0, 0, 3, 8, 66, }, @@ -40322,6 +40420,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 8, 36, }, { 8, 0, 0, 3, 8, 68, }, { 9, 0, 0, 3, 8, 36, }, + { 10, 0, 0, 3, 8, 36, }, + { 11, 0, 0, 3, 8, 36, }, { 0, 0, 0, 3, 9, 64, }, { 2, 0, 0, 3, 9, 36, }, { 1, 0, 0, 3, 9, 66, }, @@ -40332,6 +40432,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 9, 36, }, { 8, 0, 0, 3, 9, 64, }, { 9, 0, 0, 3, 9, 36, }, + { 10, 0, 0, 3, 9, 36, }, + { 11, 0, 0, 3, 9, 36, }, { 0, 0, 0, 3, 10, 60, }, { 2, 0, 0, 3, 10, 36, }, { 1, 0, 0, 3, 10, 66, }, @@ -40342,6 +40444,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 10, 36, }, { 8, 0, 0, 3, 10, 60, }, { 9, 0, 0, 3, 10, 36, }, + { 10, 0, 0, 3, 10, 36, }, + { 11, 0, 0, 3, 10, 36, }, { 0, 0, 0, 3, 11, 52, }, { 2, 0, 0, 3, 11, 36, }, { 1, 0, 0, 3, 11, 66, }, @@ -40352,7 +40456,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 11, 36, }, { 8, 0, 0, 3, 11, 52, }, { 9, 0, 0, 3, 11, 36, }, - { 0, 0, 0, 3, 12, 32, }, + { 10, 0, 0, 3, 11, 36, }, + { 11, 0, 0, 3, 11, 36, }, + { 0, 0, 0, 3, 12, 40, }, { 2, 0, 0, 3, 12, 36, }, { 1, 0, 0, 3, 12, 66, }, { 3, 0, 0, 3, 12, 40, }, @@ -40362,7 +40468,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 12, 36, }, { 8, 0, 0, 3, 12, 40, }, { 9, 0, 0, 3, 12, 36, }, - { 0, 0, 0, 3, 13, 20, }, + { 10, 0, 0, 3, 12, 36, }, + { 11, 0, 0, 3, 12, 36, }, + { 0, 0, 0, 3, 13, 28, }, { 2, 0, 0, 3, 13, 36, }, { 1, 0, 0, 3, 13, 66, }, { 3, 0, 0, 3, 13, 28, }, @@ -40371,7 +40479,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 3, 13, 28, }, { 7, 0, 0, 3, 13, 36, }, { 8, 0, 0, 3, 13, 28, }, - { 9, 0, 0, 3, 13, 36, }, + { 9, 0, 0, 3, 13, 34, }, + { 10, 0, 0, 3, 13, 36, }, + { 11, 0, 0, 3, 13, 36, }, { 0, 0, 0, 3, 14, 127, }, { 2, 0, 0, 3, 14, 127, }, { 1, 0, 0, 3, 14, 127, }, @@ -40382,6 +40492,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 14, 127, }, { 8, 0, 0, 3, 14, 127, }, { 9, 0, 0, 3, 14, 127, }, + { 10, 0, 0, 3, 14, 127, }, + { 11, 0, 0, 3, 14, 127, }, { 0, 0, 1, 2, 1, 127, }, { 2, 0, 1, 2, 1, 127, }, { 1, 0, 1, 2, 1, 127, }, @@ -40392,6 +40504,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 1, 127, }, { 8, 0, 1, 2, 1, 127, }, { 9, 0, 1, 2, 1, 127, }, + { 10, 0, 1, 2, 1, 127, }, + { 11, 0, 1, 2, 1, 127, }, { 0, 0, 1, 2, 2, 127, }, { 2, 0, 1, 2, 2, 127, }, { 1, 0, 1, 2, 2, 127, }, @@ -40402,6 +40516,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 2, 127, }, { 8, 0, 1, 2, 2, 127, }, { 9, 0, 1, 2, 2, 127, }, + { 10, 0, 1, 2, 2, 127, }, + { 11, 0, 1, 2, 2, 127, }, { 0, 0, 1, 2, 3, 52, }, { 2, 0, 1, 2, 3, 60, }, { 1, 0, 1, 2, 3, 72, }, @@ -40412,6 +40528,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 3, 60, }, { 8, 0, 1, 2, 3, 52, }, { 9, 0, 1, 2, 3, 60, }, + { 10, 0, 1, 2, 3, 60, }, + { 11, 0, 1, 2, 3, 60, }, { 0, 0, 1, 2, 4, 52, }, { 2, 0, 1, 2, 4, 60, }, { 1, 0, 1, 2, 4, 72, }, @@ -40422,6 +40540,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 4, 60, }, { 8, 0, 1, 2, 4, 52, }, { 9, 0, 1, 2, 4, 60, }, + { 10, 0, 1, 2, 4, 60, }, + { 11, 0, 1, 2, 4, 60, }, { 0, 0, 1, 2, 5, 60, }, { 2, 0, 1, 2, 5, 60, }, { 1, 0, 1, 2, 5, 72, }, @@ -40432,6 +40552,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 5, 60, }, { 8, 0, 1, 2, 5, 60, }, { 9, 0, 1, 2, 5, 60, }, + { 10, 0, 1, 2, 5, 60, }, + { 11, 0, 1, 2, 5, 60, }, { 0, 0, 1, 2, 6, 64, }, { 2, 0, 1, 2, 6, 60, }, { 1, 0, 1, 2, 6, 72, }, @@ -40442,6 +40564,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 6, 60, }, { 8, 0, 1, 2, 6, 64, }, { 9, 0, 1, 2, 6, 60, }, + { 10, 0, 1, 2, 6, 60, }, + { 11, 0, 1, 2, 6, 60, }, { 0, 0, 1, 2, 7, 60, }, { 2, 0, 1, 2, 7, 60, }, { 1, 0, 1, 2, 7, 72, }, @@ -40452,6 +40576,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 7, 60, }, { 8, 0, 1, 2, 7, 60, }, { 9, 0, 1, 2, 7, 60, }, + { 10, 0, 1, 2, 7, 60, }, + { 11, 0, 1, 2, 7, 60, }, { 0, 0, 1, 2, 8, 52, }, { 2, 0, 1, 2, 8, 60, }, { 1, 0, 1, 2, 8, 72, }, @@ -40462,6 +40588,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 8, 60, }, { 8, 0, 1, 2, 8, 52, }, { 9, 0, 1, 2, 8, 60, }, + { 10, 0, 1, 2, 8, 60, }, + { 11, 0, 1, 2, 8, 60, }, { 0, 0, 1, 2, 9, 52, }, { 2, 0, 1, 2, 9, 60, }, { 1, 0, 1, 2, 9, 72, }, @@ -40471,7 +40599,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 2, 9, 52, }, { 7, 0, 1, 2, 9, 60, }, { 8, 0, 1, 2, 9, 52, }, - { 9, 0, 1, 2, 9, 60, }, + { 9, 0, 1, 2, 9, 44, }, + { 10, 0, 1, 2, 9, 60, }, + { 11, 0, 1, 2, 9, 60, }, { 0, 0, 1, 2, 10, 40, }, { 2, 0, 1, 2, 10, 60, }, { 1, 0, 1, 2, 10, 72, }, @@ -40481,7 +40611,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 2, 10, 40, }, { 7, 0, 1, 2, 10, 60, }, { 8, 0, 1, 2, 10, 40, }, - { 9, 0, 1, 2, 10, 60, }, + { 9, 0, 1, 2, 10, 44, }, + { 10, 0, 1, 2, 10, 60, }, + { 11, 0, 1, 2, 10, 60, }, { 0, 0, 1, 2, 11, 28, }, { 2, 0, 1, 2, 11, 60, }, { 1, 0, 1, 2, 11, 72, }, @@ -40491,7 +40623,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 2, 11, 28, }, { 7, 0, 1, 2, 11, 60, }, { 8, 0, 1, 2, 11, 28, }, - { 9, 0, 1, 2, 11, 60, }, + { 9, 0, 1, 2, 11, 16, }, + { 10, 0, 1, 2, 11, 60, }, + { 11, 0, 1, 2, 11, 60, }, { 0, 0, 1, 2, 12, 127, }, { 2, 0, 1, 2, 12, 127, }, { 1, 0, 1, 2, 12, 127, }, @@ -40502,6 +40636,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 12, 127, }, { 8, 0, 1, 2, 12, 127, }, { 9, 0, 1, 2, 12, 127, }, + { 10, 0, 1, 2, 12, 127, }, + { 11, 0, 1, 2, 12, 127, }, { 0, 0, 1, 2, 13, 127, }, { 2, 0, 1, 2, 13, 127, }, { 1, 0, 1, 2, 13, 127, }, @@ -40512,6 +40648,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 13, 127, }, { 8, 0, 1, 2, 13, 127, }, { 9, 0, 1, 2, 13, 127, }, + { 10, 0, 1, 2, 13, 127, }, + { 11, 0, 1, 2, 13, 127, }, { 0, 0, 1, 2, 14, 127, }, { 2, 0, 1, 2, 14, 127, }, { 1, 0, 1, 2, 14, 127, }, @@ -40522,6 +40660,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 14, 127, }, { 8, 0, 1, 2, 14, 127, }, { 9, 0, 1, 2, 14, 127, }, + { 10, 0, 1, 2, 14, 127, }, + { 11, 0, 1, 2, 14, 127, }, { 0, 0, 1, 3, 1, 127, }, { 2, 0, 1, 3, 1, 127, }, { 1, 0, 1, 3, 1, 127, }, @@ -40532,6 +40672,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 1, 127, }, { 8, 0, 1, 3, 1, 127, }, { 9, 0, 1, 3, 1, 127, }, + { 10, 0, 1, 3, 1, 127, }, + { 11, 0, 1, 3, 1, 127, }, { 0, 0, 1, 3, 2, 127, }, { 2, 0, 1, 3, 2, 127, }, { 1, 0, 1, 3, 2, 127, }, @@ -40542,6 +40684,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 2, 127, }, { 8, 0, 1, 3, 2, 127, }, { 9, 0, 1, 3, 2, 127, }, + { 10, 0, 1, 3, 2, 127, }, + { 11, 0, 1, 3, 2, 127, }, { 0, 0, 1, 3, 3, 48, }, { 2, 0, 1, 3, 3, 36, }, { 1, 0, 1, 3, 3, 66, }, @@ -40552,6 +40696,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 3, 36, }, { 8, 0, 1, 3, 3, 48, }, { 9, 0, 1, 3, 3, 36, }, + { 10, 0, 1, 3, 3, 36, }, + { 11, 0, 1, 3, 3, 36, }, { 0, 0, 1, 3, 4, 48, }, { 2, 0, 1, 3, 4, 36, }, { 1, 0, 1, 3, 4, 66, }, @@ -40562,6 +40708,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 4, 36, }, { 8, 0, 1, 3, 4, 48, }, { 9, 0, 1, 3, 4, 36, }, + { 10, 0, 1, 3, 4, 36, }, + { 11, 0, 1, 3, 4, 36, }, { 0, 0, 1, 3, 5, 60, }, { 2, 0, 1, 3, 5, 36, }, { 1, 0, 1, 3, 5, 66, }, @@ -40572,6 +40720,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 5, 36, }, { 8, 0, 1, 3, 5, 60, }, { 9, 0, 1, 3, 5, 36, }, + { 10, 0, 1, 3, 5, 36, }, + { 11, 0, 1, 3, 5, 36, }, { 0, 0, 1, 3, 6, 64, }, { 2, 0, 1, 3, 6, 36, }, { 1, 0, 1, 3, 6, 66, }, @@ -40582,6 +40732,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 6, 36, }, { 8, 0, 1, 3, 6, 64, }, { 9, 0, 1, 3, 6, 36, }, + { 10, 0, 1, 3, 6, 36, }, + { 11, 0, 1, 3, 6, 36, }, { 0, 0, 1, 3, 7, 60, }, { 2, 0, 1, 3, 7, 36, }, { 1, 0, 1, 3, 7, 66, }, @@ -40592,6 +40744,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 7, 36, }, { 8, 0, 1, 3, 7, 60, }, { 9, 0, 1, 3, 7, 36, }, + { 10, 0, 1, 3, 7, 36, }, + { 11, 0, 1, 3, 7, 36, }, { 0, 0, 1, 3, 8, 52, }, { 2, 0, 1, 3, 8, 36, }, { 1, 0, 1, 3, 8, 66, }, @@ -40602,6 +40756,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 8, 36, }, { 8, 0, 1, 3, 8, 52, }, { 9, 0, 1, 3, 8, 36, }, + { 10, 0, 1, 3, 8, 36, }, + { 11, 0, 1, 3, 8, 36, }, { 0, 0, 1, 3, 9, 52, }, { 2, 0, 1, 3, 9, 36, }, { 1, 0, 1, 3, 9, 66, }, @@ -40612,6 +40768,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 9, 36, }, { 8, 0, 1, 3, 9, 52, }, { 9, 0, 1, 3, 9, 36, }, + { 10, 0, 1, 3, 9, 36, }, + { 11, 0, 1, 3, 9, 36, }, { 0, 0, 1, 3, 10, 40, }, { 2, 0, 1, 3, 10, 36, }, { 1, 0, 1, 3, 10, 66, }, @@ -40622,6 +40780,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 10, 36, }, { 8, 0, 1, 3, 10, 40, }, { 9, 0, 1, 3, 10, 36, }, + { 10, 0, 1, 3, 10, 36, }, + { 11, 0, 1, 3, 10, 36, }, { 0, 0, 1, 3, 11, 26, }, { 2, 0, 1, 3, 11, 36, }, { 1, 0, 1, 3, 11, 66, }, @@ -40631,7 +40791,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 3, 11, 26, }, { 7, 0, 1, 3, 11, 36, }, { 8, 0, 1, 3, 11, 26, }, - { 9, 0, 1, 3, 11, 36, }, + { 9, 0, 1, 3, 11, 16, }, + { 10, 0, 1, 3, 11, 36, }, + { 11, 0, 1, 3, 11, 36, }, { 0, 0, 1, 3, 12, 127, }, { 2, 0, 1, 3, 12, 127, }, { 1, 0, 1, 3, 12, 127, }, @@ -40642,6 +40804,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 12, 127, }, { 8, 0, 1, 3, 12, 127, }, { 9, 0, 1, 3, 12, 127, }, + { 10, 0, 1, 3, 12, 127, }, + { 11, 0, 1, 3, 12, 127, }, { 0, 0, 1, 3, 13, 127, }, { 2, 0, 1, 3, 13, 127, }, { 1, 0, 1, 3, 13, 127, }, @@ -40652,6 +40816,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 13, 127, }, { 8, 0, 1, 3, 13, 127, }, { 9, 0, 1, 3, 13, 127, }, + { 10, 0, 1, 3, 13, 127, }, + { 11, 0, 1, 3, 13, 127, }, { 0, 0, 1, 3, 14, 127, }, { 2, 0, 1, 3, 14, 127, }, { 1, 0, 1, 3, 14, 127, }, @@ -40662,6 +40828,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 14, 127, }, { 8, 0, 1, 3, 14, 127, }, { 9, 0, 1, 3, 14, 127, }, + { 10, 0, 1, 3, 14, 127, }, + { 11, 0, 1, 3, 14, 127, }, { 0, 1, 0, 1, 36, 74, }, { 2, 1, 0, 1, 36, 62, }, { 1, 1, 0, 1, 36, 60, }, @@ -40672,6 +40840,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 36, 54, }, { 8, 1, 0, 1, 36, 62, }, { 9, 1, 0, 1, 36, 62, }, + { 10, 1, 0, 1, 36, 62, }, + { 11, 1, 0, 1, 36, 62, }, { 0, 1, 0, 1, 40, 76, }, { 2, 1, 0, 1, 40, 62, }, { 1, 1, 0, 1, 40, 62, }, @@ -40682,6 +40852,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 40, 54, }, { 8, 1, 0, 1, 40, 62, }, { 9, 1, 0, 1, 40, 62, }, + { 10, 1, 0, 1, 40, 62, }, + { 11, 1, 0, 1, 40, 62, }, { 0, 1, 0, 1, 44, 76, }, { 2, 1, 0, 1, 44, 62, }, { 1, 1, 0, 1, 44, 62, }, @@ -40692,6 +40864,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 44, 54, }, { 8, 1, 0, 1, 44, 62, }, { 9, 1, 0, 1, 44, 62, }, + { 10, 1, 0, 1, 44, 62, }, + { 11, 1, 0, 1, 44, 62, }, { 0, 1, 0, 1, 48, 76, }, { 2, 1, 0, 1, 48, 62, }, { 1, 1, 0, 1, 48, 62, }, @@ -40702,6 +40876,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 48, 54, }, { 8, 1, 0, 1, 48, 62, }, { 9, 1, 0, 1, 48, 62, }, + { 10, 1, 0, 1, 48, 62, }, + { 11, 1, 0, 1, 48, 62, }, { 0, 1, 0, 1, 52, 76, }, { 2, 1, 0, 1, 52, 62, }, { 1, 1, 0, 1, 52, 62, }, @@ -40712,6 +40888,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 52, 54, }, { 8, 1, 0, 1, 52, 76, }, { 9, 1, 0, 1, 52, 62, }, + { 10, 1, 0, 1, 52, 62, }, + { 11, 1, 0, 1, 52, 62, }, { 0, 1, 0, 1, 56, 76, }, { 2, 1, 0, 1, 56, 62, }, { 1, 1, 0, 1, 56, 62, }, @@ -40722,6 +40900,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 56, 54, }, { 8, 1, 0, 1, 56, 76, }, { 9, 1, 0, 1, 56, 62, }, + { 10, 1, 0, 1, 56, 62, }, + { 11, 1, 0, 1, 56, 62, }, { 0, 1, 0, 1, 60, 76, }, { 2, 1, 0, 1, 60, 62, }, { 1, 1, 0, 1, 60, 62, }, @@ -40732,6 +40912,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 60, 54, }, { 8, 1, 0, 1, 60, 76, }, { 9, 1, 0, 1, 60, 62, }, + { 10, 1, 0, 1, 60, 62, }, + { 11, 1, 0, 1, 60, 62, }, { 0, 1, 0, 1, 64, 74, }, { 2, 1, 0, 1, 64, 62, }, { 1, 1, 0, 1, 64, 60, }, @@ -40742,6 +40924,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 64, 54, }, { 8, 1, 0, 1, 64, 74, }, { 9, 1, 0, 1, 64, 62, }, + { 10, 1, 0, 1, 64, 62, }, + { 11, 1, 0, 1, 64, 62, }, { 0, 1, 0, 1, 100, 72, }, { 2, 1, 0, 1, 100, 62, }, { 1, 1, 0, 1, 100, 76, }, @@ -40752,6 +40936,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 100, 54, }, { 8, 1, 0, 1, 100, 72, }, { 9, 1, 0, 1, 100, 127, }, + { 10, 1, 0, 1, 100, 54, }, + { 11, 1, 0, 1, 100, 62, }, { 0, 1, 0, 1, 104, 76, }, { 2, 1, 0, 1, 104, 62, }, { 1, 1, 0, 1, 104, 76, }, @@ -40762,6 +40948,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 104, 54, }, { 8, 1, 0, 1, 104, 76, }, { 9, 1, 0, 1, 104, 127, }, + { 10, 1, 0, 1, 104, 54, }, + { 11, 1, 0, 1, 104, 62, }, { 0, 1, 0, 1, 108, 76, }, { 2, 1, 0, 1, 108, 62, }, { 1, 1, 0, 1, 108, 76, }, @@ -40772,6 +40960,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 108, 54, }, { 8, 1, 0, 1, 108, 76, }, { 9, 1, 0, 1, 108, 127, }, + { 10, 1, 0, 1, 108, 54, }, + { 11, 1, 0, 1, 108, 62, }, { 0, 1, 0, 1, 112, 76, }, { 2, 1, 0, 1, 112, 62, }, { 1, 1, 0, 1, 112, 76, }, @@ -40782,6 +40972,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 112, 54, }, { 8, 1, 0, 1, 112, 76, }, { 9, 1, 0, 1, 112, 127, }, + { 10, 1, 0, 1, 112, 54, }, + { 11, 1, 0, 1, 112, 62, }, { 0, 1, 0, 1, 116, 76, }, { 2, 1, 0, 1, 116, 62, }, { 1, 1, 0, 1, 116, 76, }, @@ -40792,6 +40984,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 116, 54, }, { 8, 1, 0, 1, 116, 76, }, { 9, 1, 0, 1, 116, 127, }, + { 10, 1, 0, 1, 116, 54, }, + { 11, 1, 0, 1, 116, 62, }, { 0, 1, 0, 1, 120, 76, }, { 2, 1, 0, 1, 120, 62, }, { 1, 1, 0, 1, 120, 76, }, @@ -40802,6 +40996,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 120, 54, }, { 8, 1, 0, 1, 120, 76, }, { 9, 1, 0, 1, 120, 127, }, + { 10, 1, 0, 1, 120, 54, }, + { 11, 1, 0, 1, 120, 62, }, { 0, 1, 0, 1, 124, 76, }, { 2, 1, 0, 1, 124, 62, }, { 1, 1, 0, 1, 124, 76, }, @@ -40812,6 +41008,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 124, 54, }, { 8, 1, 0, 1, 124, 76, }, { 9, 1, 0, 1, 124, 127, }, + { 10, 1, 0, 1, 124, 54, }, + { 11, 1, 0, 1, 124, 62, }, { 0, 1, 0, 1, 128, 76, }, { 2, 1, 0, 1, 128, 62, }, { 1, 1, 0, 1, 128, 76, }, @@ -40822,6 +41020,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 128, 54, }, { 8, 1, 0, 1, 128, 76, }, { 9, 1, 0, 1, 128, 127, }, + { 10, 1, 0, 1, 128, 54, }, + { 11, 1, 0, 1, 128, 62, }, { 0, 1, 0, 1, 132, 76, }, { 2, 1, 0, 1, 132, 62, }, { 1, 1, 0, 1, 132, 76, }, @@ -40832,6 +41032,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 132, 54, }, { 8, 1, 0, 1, 132, 76, }, { 9, 1, 0, 1, 132, 127, }, + { 10, 1, 0, 1, 132, 54, }, + { 11, 1, 0, 1, 132, 62, }, { 0, 1, 0, 1, 136, 76, }, { 2, 1, 0, 1, 136, 62, }, { 1, 1, 0, 1, 136, 76, }, @@ -40842,6 +41044,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 136, 54, }, { 8, 1, 0, 1, 136, 76, }, { 9, 1, 0, 1, 136, 127, }, + { 10, 1, 0, 1, 136, 54, }, + { 11, 1, 0, 1, 136, 62, }, { 0, 1, 0, 1, 140, 72, }, { 2, 1, 0, 1, 140, 62, }, { 1, 1, 0, 1, 140, 76, }, @@ -40852,6 +41056,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 140, 54, }, { 8, 1, 0, 1, 140, 72, }, { 9, 1, 0, 1, 140, 127, }, + { 10, 1, 0, 1, 140, 54, }, + { 11, 1, 0, 1, 140, 62, }, { 0, 1, 0, 1, 144, 76, }, { 2, 1, 0, 1, 144, 127, }, { 1, 1, 0, 1, 144, 127, }, @@ -40862,8 +41068,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 144, 127, }, { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, + { 10, 1, 0, 1, 144, 127, }, + { 11, 1, 0, 1, 144, 76, }, { 0, 1, 0, 1, 149, 76, }, - { 2, 1, 0, 1, 149, 54, }, + { 2, 1, 0, 1, 149, 28, }, { 1, 1, 0, 1, 149, 127, }, { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, @@ -40871,9 +41079,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 149, 76, }, { 7, 1, 0, 1, 149, 54, }, { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, 54, }, + { 9, 1, 0, 1, 149, 28, }, + { 10, 1, 0, 1, 149, 28, }, + { 11, 1, 0, 1, 149, 58, }, { 0, 1, 0, 1, 153, 76, }, - { 2, 1, 0, 1, 153, 54, }, + { 2, 1, 0, 1, 153, 28, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, @@ -40881,9 +41091,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 153, 76, }, { 7, 1, 0, 1, 153, 54, }, { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, 54, }, + { 9, 1, 0, 1, 153, 28, }, + { 10, 1, 0, 1, 153, 28, }, + { 11, 1, 0, 1, 153, 58, }, { 0, 1, 0, 1, 157, 76, }, - { 2, 1, 0, 1, 157, 54, }, + { 2, 1, 0, 1, 157, 28, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, @@ -40891,9 +41103,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 157, 76, }, { 7, 1, 0, 1, 157, 54, }, { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, 54, }, + { 9, 1, 0, 1, 157, 28, }, + { 10, 1, 0, 1, 157, 28, }, + { 11, 1, 0, 1, 157, 58, }, { 0, 1, 0, 1, 161, 76, }, - { 2, 1, 0, 1, 161, 54, }, + { 2, 1, 0, 1, 161, 28, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, @@ -40901,9 +41115,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 161, 76, }, { 7, 1, 0, 1, 161, 54, }, { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, 54, }, + { 9, 1, 0, 1, 161, 28, }, + { 10, 1, 0, 1, 161, 28, }, + { 11, 1, 0, 1, 161, 58, }, { 0, 1, 0, 1, 165, 76, }, - { 2, 1, 0, 1, 165, 54, }, + { 2, 1, 0, 1, 165, 28, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, @@ -40911,7 +41127,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 165, 76, }, { 7, 1, 0, 1, 165, 54, }, { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, 54, }, + { 9, 1, 0, 1, 165, 28, }, + { 10, 1, 0, 1, 165, 28, }, + { 11, 1, 0, 1, 165, 58, }, { 0, 1, 0, 2, 36, 72, }, { 2, 1, 0, 2, 36, 62, }, { 1, 1, 0, 2, 36, 62, }, @@ -40922,6 +41140,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 36, 54, }, { 8, 1, 0, 2, 36, 62, }, { 9, 1, 0, 2, 36, 62, }, + { 10, 1, 0, 2, 36, 62, }, + { 11, 1, 0, 2, 36, 62, }, { 0, 1, 0, 2, 40, 76, }, { 2, 1, 0, 2, 40, 62, }, { 1, 1, 0, 2, 40, 62, }, @@ -40932,6 +41152,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 40, 54, }, { 8, 1, 0, 2, 40, 62, }, { 9, 1, 0, 2, 40, 62, }, + { 10, 1, 0, 2, 40, 62, }, + { 11, 1, 0, 2, 40, 62, }, { 0, 1, 0, 2, 44, 76, }, { 2, 1, 0, 2, 44, 62, }, { 1, 1, 0, 2, 44, 62, }, @@ -40942,6 +41164,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 44, 54, }, { 8, 1, 0, 2, 44, 62, }, { 9, 1, 0, 2, 44, 62, }, + { 10, 1, 0, 2, 44, 62, }, + { 11, 1, 0, 2, 44, 62, }, { 0, 1, 0, 2, 48, 76, }, { 2, 1, 0, 2, 48, 62, }, { 1, 1, 0, 2, 48, 62, }, @@ -40952,6 +41176,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 48, 54, }, { 8, 1, 0, 2, 48, 62, }, { 9, 1, 0, 2, 48, 62, }, + { 10, 1, 0, 2, 48, 62, }, + { 11, 1, 0, 2, 48, 62, }, { 0, 1, 0, 2, 52, 76, }, { 2, 1, 0, 2, 52, 62, }, { 1, 1, 0, 2, 52, 62, }, @@ -40962,6 +41188,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 52, 54, }, { 8, 1, 0, 2, 52, 76, }, { 9, 1, 0, 2, 52, 62, }, + { 10, 1, 0, 2, 52, 62, }, + { 11, 1, 0, 2, 52, 62, }, { 0, 1, 0, 2, 56, 76, }, { 2, 1, 0, 2, 56, 62, }, { 1, 1, 0, 2, 56, 62, }, @@ -40972,6 +41200,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 56, 54, }, { 8, 1, 0, 2, 56, 76, }, { 9, 1, 0, 2, 56, 62, }, + { 10, 1, 0, 2, 56, 62, }, + { 11, 1, 0, 2, 56, 62, }, { 0, 1, 0, 2, 60, 76, }, { 2, 1, 0, 2, 60, 62, }, { 1, 1, 0, 2, 60, 62, }, @@ -40982,6 +41212,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 60, 54, }, { 8, 1, 0, 2, 60, 76, }, { 9, 1, 0, 2, 60, 62, }, + { 10, 1, 0, 2, 60, 62, }, + { 11, 1, 0, 2, 60, 62, }, { 0, 1, 0, 2, 64, 74, }, { 2, 1, 0, 2, 64, 62, }, { 1, 1, 0, 2, 64, 60, }, @@ -40992,6 +41224,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 64, 54, }, { 8, 1, 0, 2, 64, 74, }, { 9, 1, 0, 2, 64, 62, }, + { 10, 1, 0, 2, 64, 62, }, + { 11, 1, 0, 2, 64, 62, }, { 0, 1, 0, 2, 100, 70, }, { 2, 1, 0, 2, 100, 62, }, { 1, 1, 0, 2, 100, 76, }, @@ -41002,6 +41236,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 100, 54, }, { 8, 1, 0, 2, 100, 70, }, { 9, 1, 0, 2, 100, 127, }, + { 10, 1, 0, 2, 100, 54, }, + { 11, 1, 0, 2, 100, 62, }, { 0, 1, 0, 2, 104, 76, }, { 2, 1, 0, 2, 104, 62, }, { 1, 1, 0, 2, 104, 76, }, @@ -41012,6 +41248,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 104, 54, }, { 8, 1, 0, 2, 104, 76, }, { 9, 1, 0, 2, 104, 127, }, + { 10, 1, 0, 2, 104, 54, }, + { 11, 1, 0, 2, 104, 62, }, { 0, 1, 0, 2, 108, 76, }, { 2, 1, 0, 2, 108, 62, }, { 1, 1, 0, 2, 108, 76, }, @@ -41022,6 +41260,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 108, 54, }, { 8, 1, 0, 2, 108, 76, }, { 9, 1, 0, 2, 108, 127, }, + { 10, 1, 0, 2, 108, 54, }, + { 11, 1, 0, 2, 108, 62, }, { 0, 1, 0, 2, 112, 76, }, { 2, 1, 0, 2, 112, 62, }, { 1, 1, 0, 2, 112, 76, }, @@ -41032,6 +41272,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 112, 54, }, { 8, 1, 0, 2, 112, 76, }, { 9, 1, 0, 2, 112, 127, }, + { 10, 1, 0, 2, 112, 54, }, + { 11, 1, 0, 2, 112, 62, }, { 0, 1, 0, 2, 116, 76, }, { 2, 1, 0, 2, 116, 62, }, { 1, 1, 0, 2, 116, 76, }, @@ -41042,6 +41284,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 116, 54, }, { 8, 1, 0, 2, 116, 76, }, { 9, 1, 0, 2, 116, 127, }, + { 10, 1, 0, 2, 116, 54, }, + { 11, 1, 0, 2, 116, 62, }, { 0, 1, 0, 2, 120, 76, }, { 2, 1, 0, 2, 120, 62, }, { 1, 1, 0, 2, 120, 76, }, @@ -41052,6 +41296,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 120, 54, }, { 8, 1, 0, 2, 120, 76, }, { 9, 1, 0, 2, 120, 127, }, + { 10, 1, 0, 2, 120, 54, }, + { 11, 1, 0, 2, 120, 62, }, { 0, 1, 0, 2, 124, 76, }, { 2, 1, 0, 2, 124, 62, }, { 1, 1, 0, 2, 124, 76, }, @@ -41062,6 +41308,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 124, 54, }, { 8, 1, 0, 2, 124, 76, }, { 9, 1, 0, 2, 124, 127, }, + { 10, 1, 0, 2, 124, 54, }, + { 11, 1, 0, 2, 124, 62, }, { 0, 1, 0, 2, 128, 76, }, { 2, 1, 0, 2, 128, 62, }, { 1, 1, 0, 2, 128, 76, }, @@ -41072,6 +41320,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 128, 54, }, { 8, 1, 0, 2, 128, 76, }, { 9, 1, 0, 2, 128, 127, }, + { 10, 1, 0, 2, 128, 54, }, + { 11, 1, 0, 2, 128, 62, }, { 0, 1, 0, 2, 132, 76, }, { 2, 1, 0, 2, 132, 62, }, { 1, 1, 0, 2, 132, 76, }, @@ -41082,6 +41332,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 132, 54, }, { 8, 1, 0, 2, 132, 76, }, { 9, 1, 0, 2, 132, 127, }, + { 10, 1, 0, 2, 132, 54, }, + { 11, 1, 0, 2, 132, 62, }, { 0, 1, 0, 2, 136, 76, }, { 2, 1, 0, 2, 136, 62, }, { 1, 1, 0, 2, 136, 76, }, @@ -41092,6 +41344,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 136, 54, }, { 8, 1, 0, 2, 136, 76, }, { 9, 1, 0, 2, 136, 127, }, + { 10, 1, 0, 2, 136, 54, }, + { 11, 1, 0, 2, 136, 62, }, { 0, 1, 0, 2, 140, 70, }, { 2, 1, 0, 2, 140, 62, }, { 1, 1, 0, 2, 140, 76, }, @@ -41102,6 +41356,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 140, 54, }, { 8, 1, 0, 2, 140, 70, }, { 9, 1, 0, 2, 140, 127, }, + { 10, 1, 0, 2, 140, 54, }, + { 11, 1, 0, 2, 140, 62, }, { 0, 1, 0, 2, 144, 76, }, { 2, 1, 0, 2, 144, 127, }, { 1, 1, 0, 2, 144, 127, }, @@ -41112,8 +41368,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 144, 127, }, { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, + { 10, 1, 0, 2, 144, 127, }, + { 11, 1, 0, 2, 144, 76, }, { 0, 1, 0, 2, 149, 76, }, - { 2, 1, 0, 2, 149, 54, }, + { 2, 1, 0, 2, 149, 28, }, { 1, 1, 0, 2, 149, 127, }, { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, @@ -41121,9 +41379,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 149, 76, }, { 7, 1, 0, 2, 149, 54, }, { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, 54, }, + { 9, 1, 0, 2, 149, 28, }, + { 10, 1, 0, 2, 149, 28, }, + { 11, 1, 0, 2, 149, 60, }, { 0, 1, 0, 2, 153, 76, }, - { 2, 1, 0, 2, 153, 54, }, + { 2, 1, 0, 2, 153, 28, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, @@ -41131,9 +41391,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 153, 76, }, { 7, 1, 0, 2, 153, 54, }, { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, 54, }, + { 9, 1, 0, 2, 153, 28, }, + { 10, 1, 0, 2, 153, 28, }, + { 11, 1, 0, 2, 153, 60, }, { 0, 1, 0, 2, 157, 76, }, - { 2, 1, 0, 2, 157, 54, }, + { 2, 1, 0, 2, 157, 28, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, @@ -41141,9 +41403,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 157, 76, }, { 7, 1, 0, 2, 157, 54, }, { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, 54, }, + { 9, 1, 0, 2, 157, 28, }, + { 10, 1, 0, 2, 157, 28, }, + { 11, 1, 0, 2, 157, 60, }, { 0, 1, 0, 2, 161, 76, }, - { 2, 1, 0, 2, 161, 54, }, + { 2, 1, 0, 2, 161, 28, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, @@ -41151,9 +41415,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 161, 76, }, { 7, 1, 0, 2, 161, 54, }, { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, 54, }, + { 9, 1, 0, 2, 161, 28, }, + { 10, 1, 0, 2, 161, 28, }, + { 11, 1, 0, 2, 161, 60, }, { 0, 1, 0, 2, 165, 76, }, - { 2, 1, 0, 2, 165, 54, }, + { 2, 1, 0, 2, 165, 28, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, @@ -41161,7 +41427,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 165, 76, }, { 7, 1, 0, 2, 165, 54, }, { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, 54, }, + { 9, 1, 0, 2, 165, 28, }, + { 10, 1, 0, 2, 165, 28, }, + { 11, 1, 0, 2, 165, 60, }, { 0, 1, 0, 3, 36, 68, }, { 2, 1, 0, 3, 36, 38, }, { 1, 1, 0, 3, 36, 50, }, @@ -41172,6 +41440,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 36, 30, }, { 8, 1, 0, 3, 36, 50, }, { 9, 1, 0, 3, 36, 38, }, + { 10, 1, 0, 3, 36, 38, }, + { 11, 1, 0, 3, 36, 38, }, { 0, 1, 0, 3, 40, 68, }, { 2, 1, 0, 3, 40, 38, }, { 1, 1, 0, 3, 40, 50, }, @@ -41182,6 +41452,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 40, 30, }, { 8, 1, 0, 3, 40, 50, }, { 9, 1, 0, 3, 40, 38, }, + { 10, 1, 0, 3, 40, 38, }, + { 11, 1, 0, 3, 40, 38, }, { 0, 1, 0, 3, 44, 68, }, { 2, 1, 0, 3, 44, 38, }, { 1, 1, 0, 3, 44, 50, }, @@ -41192,6 +41464,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 44, 30, }, { 8, 1, 0, 3, 44, 50, }, { 9, 1, 0, 3, 44, 38, }, + { 10, 1, 0, 3, 44, 38, }, + { 11, 1, 0, 3, 44, 38, }, { 0, 1, 0, 3, 48, 68, }, { 2, 1, 0, 3, 48, 38, }, { 1, 1, 0, 3, 48, 50, }, @@ -41202,6 +41476,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 48, 30, }, { 8, 1, 0, 3, 48, 50, }, { 9, 1, 0, 3, 48, 38, }, + { 10, 1, 0, 3, 48, 38, }, + { 11, 1, 0, 3, 48, 38, }, { 0, 1, 0, 3, 52, 68, }, { 2, 1, 0, 3, 52, 38, }, { 1, 1, 0, 3, 52, 50, }, @@ -41212,6 +41488,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 52, 30, }, { 8, 1, 0, 3, 52, 68, }, { 9, 1, 0, 3, 52, 38, }, + { 10, 1, 0, 3, 52, 38, }, + { 11, 1, 0, 3, 52, 38, }, { 0, 1, 0, 3, 56, 68, }, { 2, 1, 0, 3, 56, 38, }, { 1, 1, 0, 3, 56, 50, }, @@ -41222,6 +41500,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 56, 30, }, { 8, 1, 0, 3, 56, 68, }, { 9, 1, 0, 3, 56, 38, }, + { 10, 1, 0, 3, 56, 38, }, + { 11, 1, 0, 3, 56, 38, }, { 0, 1, 0, 3, 60, 66, }, { 2, 1, 0, 3, 60, 38, }, { 1, 1, 0, 3, 60, 50, }, @@ -41232,6 +41512,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 60, 30, }, { 8, 1, 0, 3, 60, 66, }, { 9, 1, 0, 3, 60, 38, }, + { 10, 1, 0, 3, 60, 38, }, + { 11, 1, 0, 3, 60, 38, }, { 0, 1, 0, 3, 64, 68, }, { 2, 1, 0, 3, 64, 38, }, { 1, 1, 0, 3, 64, 50, }, @@ -41242,6 +41524,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 64, 30, }, { 8, 1, 0, 3, 64, 68, }, { 9, 1, 0, 3, 64, 38, }, + { 10, 1, 0, 3, 64, 38, }, + { 11, 1, 0, 3, 64, 38, }, { 0, 1, 0, 3, 100, 60, }, { 2, 1, 0, 3, 100, 38, }, { 1, 1, 0, 3, 100, 70, }, @@ -41252,6 +41536,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 100, 30, }, { 8, 1, 0, 3, 100, 60, }, { 9, 1, 0, 3, 100, 127, }, + { 10, 1, 0, 3, 100, 30, }, + { 11, 1, 0, 3, 100, 38, }, { 0, 1, 0, 3, 104, 68, }, { 2, 1, 0, 3, 104, 38, }, { 1, 1, 0, 3, 104, 70, }, @@ -41262,6 +41548,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 104, 30, }, { 8, 1, 0, 3, 104, 68, }, { 9, 1, 0, 3, 104, 127, }, + { 10, 1, 0, 3, 104, 30, }, + { 11, 1, 0, 3, 104, 38, }, { 0, 1, 0, 3, 108, 68, }, { 2, 1, 0, 3, 108, 38, }, { 1, 1, 0, 3, 108, 70, }, @@ -41272,6 +41560,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 108, 30, }, { 8, 1, 0, 3, 108, 68, }, { 9, 1, 0, 3, 108, 127, }, + { 10, 1, 0, 3, 108, 30, }, + { 11, 1, 0, 3, 108, 38, }, { 0, 1, 0, 3, 112, 68, }, { 2, 1, 0, 3, 112, 38, }, { 1, 1, 0, 3, 112, 70, }, @@ -41282,6 +41572,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 112, 30, }, { 8, 1, 0, 3, 112, 68, }, { 9, 1, 0, 3, 112, 127, }, + { 10, 1, 0, 3, 112, 30, }, + { 11, 1, 0, 3, 112, 38, }, { 0, 1, 0, 3, 116, 68, }, { 2, 1, 0, 3, 116, 38, }, { 1, 1, 0, 3, 116, 70, }, @@ -41292,6 +41584,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 116, 30, }, { 8, 1, 0, 3, 116, 68, }, { 9, 1, 0, 3, 116, 127, }, + { 10, 1, 0, 3, 116, 30, }, + { 11, 1, 0, 3, 116, 38, }, { 0, 1, 0, 3, 120, 68, }, { 2, 1, 0, 3, 120, 38, }, { 1, 1, 0, 3, 120, 70, }, @@ -41302,6 +41596,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 120, 30, }, { 8, 1, 0, 3, 120, 68, }, { 9, 1, 0, 3, 120, 127, }, + { 10, 1, 0, 3, 120, 30, }, + { 11, 1, 0, 3, 120, 38, }, { 0, 1, 0, 3, 124, 68, }, { 2, 1, 0, 3, 124, 38, }, { 1, 1, 0, 3, 124, 70, }, @@ -41312,6 +41608,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 124, 30, }, { 8, 1, 0, 3, 124, 68, }, { 9, 1, 0, 3, 124, 127, }, + { 10, 1, 0, 3, 124, 30, }, + { 11, 1, 0, 3, 124, 38, }, { 0, 1, 0, 3, 128, 68, }, { 2, 1, 0, 3, 128, 38, }, { 1, 1, 0, 3, 128, 70, }, @@ -41322,6 +41620,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 128, 30, }, { 8, 1, 0, 3, 128, 68, }, { 9, 1, 0, 3, 128, 127, }, + { 10, 1, 0, 3, 128, 30, }, + { 11, 1, 0, 3, 128, 38, }, { 0, 1, 0, 3, 132, 68, }, { 2, 1, 0, 3, 132, 38, }, { 1, 1, 0, 3, 132, 70, }, @@ -41332,6 +41632,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 132, 30, }, { 8, 1, 0, 3, 132, 68, }, { 9, 1, 0, 3, 132, 127, }, + { 10, 1, 0, 3, 132, 30, }, + { 11, 1, 0, 3, 132, 38, }, { 0, 1, 0, 3, 136, 68, }, { 2, 1, 0, 3, 136, 38, }, { 1, 1, 0, 3, 136, 70, }, @@ -41342,6 +41644,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 136, 30, }, { 8, 1, 0, 3, 136, 68, }, { 9, 1, 0, 3, 136, 127, }, + { 10, 1, 0, 3, 136, 30, }, + { 11, 1, 0, 3, 136, 38, }, { 0, 1, 0, 3, 140, 60, }, { 2, 1, 0, 3, 140, 38, }, { 1, 1, 0, 3, 140, 70, }, @@ -41352,6 +41656,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 140, 30, }, { 8, 1, 0, 3, 140, 60, }, { 9, 1, 0, 3, 140, 127, }, + { 10, 1, 0, 3, 140, 30, }, + { 11, 1, 0, 3, 140, 38, }, { 0, 1, 0, 3, 144, 68, }, { 2, 1, 0, 3, 144, 127, }, { 1, 1, 0, 3, 144, 127, }, @@ -41362,8 +41668,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 144, 127, }, { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, + { 10, 1, 0, 3, 144, 127, }, + { 11, 1, 0, 3, 144, 60, }, { 0, 1, 0, 3, 149, 76, }, - { 2, 1, 0, 3, 149, 30, }, + { 2, 1, 0, 3, 149, 4, }, { 1, 1, 0, 3, 149, 127, }, { 3, 1, 0, 3, 149, 76, }, { 4, 1, 0, 3, 149, 60, }, @@ -41371,9 +41679,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 149, 76, }, { 7, 1, 0, 3, 149, 30, }, { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, 30, }, + { 9, 1, 0, 3, 149, 4, }, + { 10, 1, 0, 3, 149, 4, }, + { 11, 1, 0, 3, 149, 36, }, { 0, 1, 0, 3, 153, 76, }, - { 2, 1, 0, 3, 153, 30, }, + { 2, 1, 0, 3, 153, 4, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, { 4, 1, 0, 3, 153, 60, }, @@ -41381,9 +41691,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 153, 76, }, { 7, 1, 0, 3, 153, 30, }, { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, 30, }, + { 9, 1, 0, 3, 153, 4, }, + { 10, 1, 0, 3, 153, 4, }, + { 11, 1, 0, 3, 153, 36, }, { 0, 1, 0, 3, 157, 76, }, - { 2, 1, 0, 3, 157, 30, }, + { 2, 1, 0, 3, 157, 4, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, { 4, 1, 0, 3, 157, 60, }, @@ -41391,9 +41703,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 157, 76, }, { 7, 1, 0, 3, 157, 30, }, { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, 30, }, + { 9, 1, 0, 3, 157, 4, }, + { 10, 1, 0, 3, 157, 4, }, + { 11, 1, 0, 3, 157, 36, }, { 0, 1, 0, 3, 161, 76, }, - { 2, 1, 0, 3, 161, 30, }, + { 2, 1, 0, 3, 161, 4, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, { 4, 1, 0, 3, 161, 60, }, @@ -41401,9 +41715,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 161, 76, }, { 7, 1, 0, 3, 161, 30, }, { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, 30, }, + { 9, 1, 0, 3, 161, 4, }, + { 10, 1, 0, 3, 161, 4, }, + { 11, 1, 0, 3, 161, 36, }, { 0, 1, 0, 3, 165, 76, }, - { 2, 1, 0, 3, 165, 30, }, + { 2, 1, 0, 3, 165, 4, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, { 4, 1, 0, 3, 165, 60, }, @@ -41411,7 +41727,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 165, 76, }, { 7, 1, 0, 3, 165, 30, }, { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, 30, }, + { 9, 1, 0, 3, 165, 4, }, + { 10, 1, 0, 3, 165, 4, }, + { 11, 1, 0, 3, 165, 36, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, { 1, 1, 1, 2, 38, 62, }, @@ -41422,6 +41740,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 38, 54, }, { 8, 1, 1, 2, 38, 62, }, { 9, 1, 1, 2, 38, 64, }, + { 10, 1, 1, 2, 38, 64, }, + { 11, 1, 1, 2, 38, 64, }, { 0, 1, 1, 2, 46, 72, }, { 2, 1, 1, 2, 46, 64, }, { 1, 1, 1, 2, 46, 62, }, @@ -41432,6 +41752,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 46, 54, }, { 8, 1, 1, 2, 46, 62, }, { 9, 1, 1, 2, 46, 64, }, + { 10, 1, 1, 2, 46, 64, }, + { 11, 1, 1, 2, 46, 64, }, { 0, 1, 1, 2, 54, 72, }, { 2, 1, 1, 2, 54, 64, }, { 1, 1, 1, 2, 54, 62, }, @@ -41442,6 +41764,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 54, 54, }, { 8, 1, 1, 2, 54, 72, }, { 9, 1, 1, 2, 54, 64, }, + { 10, 1, 1, 2, 54, 64, }, + { 11, 1, 1, 2, 54, 64, }, { 0, 1, 1, 2, 62, 64, }, { 2, 1, 1, 2, 62, 64, }, { 1, 1, 1, 2, 62, 62, }, @@ -41452,6 +41776,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 62, 54, }, { 8, 1, 1, 2, 62, 64, }, { 9, 1, 1, 2, 62, 64, }, + { 10, 1, 1, 2, 62, 64, }, + { 11, 1, 1, 2, 62, 64, }, { 0, 1, 1, 2, 102, 58, }, { 2, 1, 1, 2, 102, 64, }, { 1, 1, 1, 2, 102, 72, }, @@ -41462,6 +41788,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 102, 54, }, { 8, 1, 1, 2, 102, 58, }, { 9, 1, 1, 2, 102, 127, }, + { 10, 1, 1, 2, 102, 54, }, + { 11, 1, 1, 2, 102, 64, }, { 0, 1, 1, 2, 110, 72, }, { 2, 1, 1, 2, 110, 64, }, { 1, 1, 1, 2, 110, 72, }, @@ -41472,6 +41800,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 110, 54, }, { 8, 1, 1, 2, 110, 72, }, { 9, 1, 1, 2, 110, 127, }, + { 10, 1, 1, 2, 110, 54, }, + { 11, 1, 1, 2, 110, 64, }, { 0, 1, 1, 2, 118, 72, }, { 2, 1, 1, 2, 118, 64, }, { 1, 1, 1, 2, 118, 72, }, @@ -41482,6 +41812,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 118, 54, }, { 8, 1, 1, 2, 118, 72, }, { 9, 1, 1, 2, 118, 127, }, + { 10, 1, 1, 2, 118, 54, }, + { 11, 1, 1, 2, 118, 64, }, { 0, 1, 1, 2, 126, 72, }, { 2, 1, 1, 2, 126, 64, }, { 1, 1, 1, 2, 126, 72, }, @@ -41492,6 +41824,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 126, 54, }, { 8, 1, 1, 2, 126, 72, }, { 9, 1, 1, 2, 126, 127, }, + { 10, 1, 1, 2, 126, 54, }, + { 11, 1, 1, 2, 126, 64, }, { 0, 1, 1, 2, 134, 72, }, { 2, 1, 1, 2, 134, 64, }, { 1, 1, 1, 2, 134, 72, }, @@ -41502,6 +41836,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 134, 54, }, { 8, 1, 1, 2, 134, 72, }, { 9, 1, 1, 2, 134, 127, }, + { 10, 1, 1, 2, 134, 54, }, + { 11, 1, 1, 2, 134, 64, }, { 0, 1, 1, 2, 142, 72, }, { 2, 1, 1, 2, 142, 127, }, { 1, 1, 1, 2, 142, 127, }, @@ -41512,8 +41848,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 142, 127, }, { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, + { 10, 1, 1, 2, 142, 127, }, + { 11, 1, 1, 2, 142, 72, }, { 0, 1, 1, 2, 151, 72, }, - { 2, 1, 1, 2, 151, 54, }, + { 2, 1, 1, 2, 151, 28, }, { 1, 1, 1, 2, 151, 127, }, { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, @@ -41521,9 +41859,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 151, 72, }, { 7, 1, 1, 2, 151, 54, }, { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, 54, }, + { 9, 1, 1, 2, 151, 28, }, + { 10, 1, 1, 2, 151, 28, }, + { 11, 1, 1, 2, 151, 64, }, { 0, 1, 1, 2, 159, 72, }, - { 2, 1, 1, 2, 159, 54, }, + { 2, 1, 1, 2, 159, 28, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, @@ -41531,7 +41871,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 159, 72, }, { 7, 1, 1, 2, 159, 54, }, { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, 54, }, + { 9, 1, 1, 2, 159, 28, }, + { 10, 1, 1, 2, 159, 28, }, + { 11, 1, 1, 2, 159, 64, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, @@ -41542,6 +41884,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 38, 30, }, { 8, 1, 1, 3, 38, 50, }, { 9, 1, 1, 3, 38, 40, }, + { 10, 1, 1, 3, 38, 40, }, + { 11, 1, 1, 3, 38, 40, }, { 0, 1, 1, 3, 46, 68, }, { 2, 1, 1, 3, 46, 40, }, { 1, 1, 1, 3, 46, 50, }, @@ -41552,6 +41896,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 46, 30, }, { 8, 1, 1, 3, 46, 50, }, { 9, 1, 1, 3, 46, 40, }, + { 10, 1, 1, 3, 46, 40, }, + { 11, 1, 1, 3, 46, 40, }, { 0, 1, 1, 3, 54, 68, }, { 2, 1, 1, 3, 54, 40, }, { 1, 1, 1, 3, 54, 50, }, @@ -41562,6 +41908,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 54, 30, }, { 8, 1, 1, 3, 54, 68, }, { 9, 1, 1, 3, 54, 40, }, + { 10, 1, 1, 3, 54, 40, }, + { 11, 1, 1, 3, 54, 40, }, { 0, 1, 1, 3, 62, 58, }, { 2, 1, 1, 3, 62, 40, }, { 1, 1, 1, 3, 62, 48, }, @@ -41572,6 +41920,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 62, 30, }, { 8, 1, 1, 3, 62, 58, }, { 9, 1, 1, 3, 62, 40, }, + { 10, 1, 1, 3, 62, 40, }, + { 11, 1, 1, 3, 62, 40, }, { 0, 1, 1, 3, 102, 54, }, { 2, 1, 1, 3, 102, 40, }, { 1, 1, 1, 3, 102, 70, }, @@ -41582,6 +41932,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 102, 30, }, { 8, 1, 1, 3, 102, 54, }, { 9, 1, 1, 3, 102, 127, }, + { 10, 1, 1, 3, 102, 30, }, + { 11, 1, 1, 3, 102, 40, }, { 0, 1, 1, 3, 110, 68, }, { 2, 1, 1, 3, 110, 40, }, { 1, 1, 1, 3, 110, 70, }, @@ -41592,6 +41944,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 110, 30, }, { 8, 1, 1, 3, 110, 68, }, { 9, 1, 1, 3, 110, 127, }, + { 10, 1, 1, 3, 110, 30, }, + { 11, 1, 1, 3, 110, 40, }, { 0, 1, 1, 3, 118, 68, }, { 2, 1, 1, 3, 118, 40, }, { 1, 1, 1, 3, 118, 70, }, @@ -41602,6 +41956,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 118, 30, }, { 8, 1, 1, 3, 118, 68, }, { 9, 1, 1, 3, 118, 127, }, + { 10, 1, 1, 3, 118, 30, }, + { 11, 1, 1, 3, 118, 40, }, { 0, 1, 1, 3, 126, 68, }, { 2, 1, 1, 3, 126, 40, }, { 1, 1, 1, 3, 126, 70, }, @@ -41612,6 +41968,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 126, 30, }, { 8, 1, 1, 3, 126, 68, }, { 9, 1, 1, 3, 126, 127, }, + { 10, 1, 1, 3, 126, 30, }, + { 11, 1, 1, 3, 126, 40, }, { 0, 1, 1, 3, 134, 68, }, { 2, 1, 1, 3, 134, 40, }, { 1, 1, 1, 3, 134, 70, }, @@ -41622,6 +41980,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 134, 30, }, { 8, 1, 1, 3, 134, 68, }, { 9, 1, 1, 3, 134, 127, }, + { 10, 1, 1, 3, 134, 30, }, + { 11, 1, 1, 3, 134, 40, }, { 0, 1, 1, 3, 142, 68, }, { 2, 1, 1, 3, 142, 127, }, { 1, 1, 1, 3, 142, 127, }, @@ -41632,8 +41992,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 142, 127, }, { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, + { 10, 1, 1, 3, 142, 127, }, + { 11, 1, 1, 3, 142, 62, }, { 0, 1, 1, 3, 151, 72, }, - { 2, 1, 1, 3, 151, 30, }, + { 2, 1, 1, 3, 151, 4, }, { 1, 1, 1, 3, 151, 127, }, { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, @@ -41641,9 +42003,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 151, 72, }, { 7, 1, 1, 3, 151, 30, }, { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, 30, }, + { 9, 1, 1, 3, 151, 4, }, + { 10, 1, 1, 3, 151, 4, }, + { 11, 1, 1, 3, 151, 40, }, { 0, 1, 1, 3, 159, 72, }, - { 2, 1, 1, 3, 159, 30, }, + { 2, 1, 1, 3, 159, 4, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, @@ -41651,7 +42015,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 159, 72, }, { 7, 1, 1, 3, 159, 30, }, { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, 30, }, + { 9, 1, 1, 3, 159, 4, }, + { 10, 1, 1, 3, 159, 4, }, + { 11, 1, 1, 3, 159, 40, }, { 0, 1, 2, 4, 42, 64, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, @@ -41662,6 +42028,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 42, 54, }, { 8, 1, 2, 4, 42, 62, }, { 9, 1, 2, 4, 42, 64, }, + { 10, 1, 2, 4, 42, 64, }, + { 11, 1, 2, 4, 42, 64, }, { 0, 1, 2, 4, 58, 62, }, { 2, 1, 2, 4, 58, 64, }, { 1, 1, 2, 4, 58, 64, }, @@ -41672,6 +42040,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 58, 54, }, { 8, 1, 2, 4, 58, 62, }, { 9, 1, 2, 4, 58, 64, }, + { 10, 1, 2, 4, 58, 64, }, + { 11, 1, 2, 4, 58, 64, }, { 0, 1, 2, 4, 106, 58, }, { 2, 1, 2, 4, 106, 64, }, { 1, 1, 2, 4, 106, 72, }, @@ -41682,6 +42052,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 106, 54, }, { 8, 1, 2, 4, 106, 58, }, { 9, 1, 2, 4, 106, 127, }, + { 10, 1, 2, 4, 106, 54, }, + { 11, 1, 2, 4, 106, 64, }, { 0, 1, 2, 4, 122, 72, }, { 2, 1, 2, 4, 122, 64, }, { 1, 1, 2, 4, 122, 72, }, @@ -41692,6 +42064,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 122, 54, }, { 8, 1, 2, 4, 122, 72, }, { 9, 1, 2, 4, 122, 127, }, + { 10, 1, 2, 4, 122, 54, }, + { 11, 1, 2, 4, 122, 64, }, { 0, 1, 2, 4, 138, 72, }, { 2, 1, 2, 4, 138, 127, }, { 1, 1, 2, 4, 138, 127, }, @@ -41702,8 +42076,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 138, 127, }, { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, + { 10, 1, 2, 4, 138, 127, }, + { 11, 1, 2, 4, 138, 72, }, { 0, 1, 2, 4, 155, 72, }, - { 2, 1, 2, 4, 155, 54, }, + { 2, 1, 2, 4, 155, 28, }, { 1, 1, 2, 4, 155, 127, }, { 3, 1, 2, 4, 155, 72, }, { 4, 1, 2, 4, 155, 68, }, @@ -41711,7 +42087,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 4, 155, 72, }, { 7, 1, 2, 4, 155, 54, }, { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, 54, }, + { 9, 1, 2, 4, 155, 28, }, + { 10, 1, 2, 4, 155, 28, }, + { 11, 1, 2, 4, 155, 64, }, { 0, 1, 2, 5, 42, 54, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, @@ -41722,6 +42100,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 42, 30, }, { 8, 1, 2, 5, 42, 50, }, { 9, 1, 2, 5, 42, 40, }, + { 10, 1, 2, 5, 42, 40, }, + { 11, 1, 2, 5, 42, 40, }, { 0, 1, 2, 5, 58, 52, }, { 2, 1, 2, 5, 58, 40, }, { 1, 1, 2, 5, 58, 50, }, @@ -41732,6 +42112,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 58, 30, }, { 8, 1, 2, 5, 58, 52, }, { 9, 1, 2, 5, 58, 40, }, + { 10, 1, 2, 5, 58, 40, }, + { 11, 1, 2, 5, 58, 40, }, { 0, 1, 2, 5, 106, 50, }, { 2, 1, 2, 5, 106, 40, }, { 1, 1, 2, 5, 106, 72, }, @@ -41742,6 +42124,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 106, 30, }, { 8, 1, 2, 5, 106, 50, }, { 9, 1, 2, 5, 106, 127, }, + { 10, 1, 2, 5, 106, 30, }, + { 11, 1, 2, 5, 106, 40, }, { 0, 1, 2, 5, 122, 66, }, { 2, 1, 2, 5, 122, 40, }, { 1, 1, 2, 5, 122, 72, }, @@ -41752,6 +42136,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 122, 30, }, { 8, 1, 2, 5, 122, 66, }, { 9, 1, 2, 5, 122, 127, }, + { 10, 1, 2, 5, 122, 30, }, + { 11, 1, 2, 5, 122, 40, }, { 0, 1, 2, 5, 138, 66, }, { 2, 1, 2, 5, 138, 127, }, { 1, 1, 2, 5, 138, 127, }, @@ -41762,8 +42148,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 138, 127, }, { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, + { 10, 1, 2, 5, 138, 127, }, + { 11, 1, 2, 5, 138, 60, }, { 0, 1, 2, 5, 155, 62, }, - { 2, 1, 2, 5, 155, 30, }, + { 2, 1, 2, 5, 155, 4, }, { 1, 1, 2, 5, 155, 127, }, { 3, 1, 2, 5, 155, 62, }, { 4, 1, 2, 5, 155, 58, }, @@ -41771,7 +42159,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 5, 155, 62, }, { 7, 1, 2, 5, 155, 30, }, { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, 30, }, + { 9, 1, 2, 5, 155, 4, }, + { 10, 1, 2, 5, 155, 4, }, + { 11, 1, 2, 5, 155, 40, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0); @@ -41783,9 +42173,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 1, 72, }, { 4, 0, 0, 0, 1, 76, }, { 5, 0, 0, 0, 1, 56, }, - { 6, 0, 0, 0, 1, 72, }, - { 7, 0, 0, 0, 1, 60, }, - { 8, 0, 0, 0, 1, 72, }, { 9, 0, 0, 0, 1, 60, }, { 0, 0, 0, 0, 2, 72, }, { 2, 0, 0, 0, 2, 56, }, @@ -41793,9 +42180,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 2, 72, }, { 4, 0, 0, 0, 2, 76, }, { 5, 0, 0, 0, 2, 56, }, - { 6, 0, 0, 0, 2, 72, }, - { 7, 0, 0, 0, 2, 60, }, - { 8, 0, 0, 0, 2, 72, }, { 9, 0, 0, 0, 2, 60, }, { 0, 0, 0, 0, 3, 76, }, { 2, 0, 0, 0, 3, 56, }, @@ -41803,9 +42187,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 3, 76, }, { 4, 0, 0, 0, 3, 76, }, { 5, 0, 0, 0, 3, 56, }, - { 6, 0, 0, 0, 3, 76, }, - { 7, 0, 0, 0, 3, 60, }, - { 8, 0, 0, 0, 3, 76, }, { 9, 0, 0, 0, 3, 60, }, { 0, 0, 0, 0, 4, 76, }, { 2, 0, 0, 0, 4, 56, }, @@ -41813,9 +42194,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 4, 76, }, { 4, 0, 0, 0, 4, 76, }, { 5, 0, 0, 0, 4, 56, }, - { 6, 0, 0, 0, 4, 76, }, - { 7, 0, 0, 0, 4, 60, }, - { 8, 0, 0, 0, 4, 76, }, { 9, 0, 0, 0, 4, 60, }, { 0, 0, 0, 0, 5, 76, }, { 2, 0, 0, 0, 5, 56, }, @@ -41823,9 +42201,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 5, 76, }, { 4, 0, 0, 0, 5, 76, }, { 5, 0, 0, 0, 5, 56, }, - { 6, 0, 0, 0, 5, 76, }, - { 7, 0, 0, 0, 5, 60, }, - { 8, 0, 0, 0, 5, 76, }, { 9, 0, 0, 0, 5, 60, }, { 0, 0, 0, 0, 6, 76, }, { 2, 0, 0, 0, 6, 56, }, @@ -41833,9 +42208,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 6, 76, }, { 4, 0, 0, 0, 6, 76, }, { 5, 0, 0, 0, 6, 56, }, - { 6, 0, 0, 0, 6, 76, }, - { 7, 0, 0, 0, 6, 60, }, - { 8, 0, 0, 0, 6, 76, }, { 9, 0, 0, 0, 6, 60, }, { 0, 0, 0, 0, 7, 76, }, { 2, 0, 0, 0, 7, 56, }, @@ -41843,9 +42215,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 7, 76, }, { 4, 0, 0, 0, 7, 76, }, { 5, 0, 0, 0, 7, 56, }, - { 6, 0, 0, 0, 7, 76, }, - { 7, 0, 0, 0, 7, 60, }, - { 8, 0, 0, 0, 7, 76, }, { 9, 0, 0, 0, 7, 60, }, { 0, 0, 0, 0, 8, 76, }, { 2, 0, 0, 0, 8, 56, }, @@ -41853,9 +42222,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 8, 76, }, { 4, 0, 0, 0, 8, 76, }, { 5, 0, 0, 0, 8, 56, }, - { 6, 0, 0, 0, 8, 76, }, - { 7, 0, 0, 0, 8, 60, }, - { 8, 0, 0, 0, 8, 76, }, { 9, 0, 0, 0, 8, 60, }, { 0, 0, 0, 0, 9, 76, }, { 2, 0, 0, 0, 9, 56, }, @@ -41863,9 +42229,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 9, 76, }, { 4, 0, 0, 0, 9, 76, }, { 5, 0, 0, 0, 9, 56, }, - { 6, 0, 0, 0, 9, 76, }, - { 7, 0, 0, 0, 9, 60, }, - { 8, 0, 0, 0, 9, 76, }, { 9, 0, 0, 0, 9, 60, }, { 0, 0, 0, 0, 10, 72, }, { 2, 0, 0, 0, 10, 56, }, @@ -41873,9 +42236,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 10, 72, }, { 4, 0, 0, 0, 10, 76, }, { 5, 0, 0, 0, 10, 56, }, - { 6, 0, 0, 0, 10, 72, }, - { 7, 0, 0, 0, 10, 60, }, - { 8, 0, 0, 0, 10, 72, }, { 9, 0, 0, 0, 10, 60, }, { 0, 0, 0, 0, 11, 72, }, { 2, 0, 0, 0, 11, 56, }, @@ -41883,29 +42243,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 11, 72, }, { 4, 0, 0, 0, 11, 76, }, { 5, 0, 0, 0, 11, 56, }, - { 6, 0, 0, 0, 11, 72, }, - { 7, 0, 0, 0, 11, 60, }, - { 8, 0, 0, 0, 11, 72, }, { 9, 0, 0, 0, 11, 60, }, - { 0, 0, 0, 0, 12, 44, }, + { 0, 0, 0, 0, 12, 52, }, { 2, 0, 0, 0, 12, 56, }, { 1, 0, 0, 0, 12, 72, }, { 3, 0, 0, 0, 12, 52, }, { 4, 0, 0, 0, 12, 76, }, { 5, 0, 0, 0, 12, 56, }, - { 6, 0, 0, 0, 12, 52, }, - { 7, 0, 0, 0, 12, 60, }, - { 8, 0, 0, 0, 12, 52, }, { 9, 0, 0, 0, 12, 60, }, - { 0, 0, 0, 0, 13, 40, }, + { 0, 0, 0, 0, 13, 48, }, { 2, 0, 0, 0, 13, 56, }, { 1, 0, 0, 0, 13, 72, }, { 3, 0, 0, 0, 13, 48, }, { 4, 0, 0, 0, 13, 76, }, { 5, 0, 0, 0, 13, 56, }, - { 6, 0, 0, 0, 13, 48, }, - { 7, 0, 0, 0, 13, 60, }, - { 8, 0, 0, 0, 13, 48, }, { 9, 0, 0, 0, 13, 60, }, { 0, 0, 0, 0, 14, 127, }, { 2, 0, 0, 0, 14, 127, }, @@ -41913,9 +42264,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 14, 127, }, { 4, 0, 0, 0, 14, 127, }, { 5, 0, 0, 0, 14, 127, }, - { 6, 0, 0, 0, 14, 127, }, - { 7, 0, 0, 0, 14, 127, }, - { 8, 0, 0, 0, 14, 127, }, { 9, 0, 0, 0, 14, 127, }, { 0, 0, 0, 1, 1, 52, }, { 2, 0, 0, 1, 1, 60, }, @@ -41923,9 +42271,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 1, 52, }, { 4, 0, 0, 1, 1, 76, }, { 5, 0, 0, 1, 1, 60, }, - { 6, 0, 0, 1, 1, 52, }, - { 7, 0, 0, 1, 1, 60, }, - { 8, 0, 0, 1, 1, 52, }, { 9, 0, 0, 1, 1, 60, }, { 0, 0, 0, 1, 2, 60, }, { 2, 0, 0, 1, 2, 60, }, @@ -41933,9 +42278,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 2, 60, }, { 4, 0, 0, 1, 2, 76, }, { 5, 0, 0, 1, 2, 60, }, - { 6, 0, 0, 1, 2, 60, }, - { 7, 0, 0, 1, 2, 60, }, - { 8, 0, 0, 1, 2, 60, }, { 9, 0, 0, 1, 2, 60, }, { 0, 0, 0, 1, 3, 64, }, { 2, 0, 0, 1, 3, 60, }, @@ -41943,9 +42285,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 3, 64, }, { 4, 0, 0, 1, 3, 76, }, { 5, 0, 0, 1, 3, 60, }, - { 6, 0, 0, 1, 3, 64, }, - { 7, 0, 0, 1, 3, 60, }, - { 8, 0, 0, 1, 3, 64, }, { 9, 0, 0, 1, 3, 60, }, { 0, 0, 0, 1, 4, 68, }, { 2, 0, 0, 1, 4, 60, }, @@ -41953,9 +42292,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 4, 68, }, { 4, 0, 0, 1, 4, 76, }, { 5, 0, 0, 1, 4, 60, }, - { 6, 0, 0, 1, 4, 68, }, - { 7, 0, 0, 1, 4, 60, }, - { 8, 0, 0, 1, 4, 68, }, { 9, 0, 0, 1, 4, 60, }, { 0, 0, 0, 1, 5, 76, }, { 2, 0, 0, 1, 5, 60, }, @@ -41963,9 +42299,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 5, 76, }, { 4, 0, 0, 1, 5, 76, }, { 5, 0, 0, 1, 5, 60, }, - { 6, 0, 0, 1, 5, 76, }, - { 7, 0, 0, 1, 5, 60, }, - { 8, 0, 0, 1, 5, 76, }, { 9, 0, 0, 1, 5, 60, }, { 0, 0, 0, 1, 6, 76, }, { 2, 0, 0, 1, 6, 60, }, @@ -41973,9 +42306,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 6, 76, }, { 4, 0, 0, 1, 6, 76, }, { 5, 0, 0, 1, 6, 60, }, - { 6, 0, 0, 1, 6, 76, }, - { 7, 0, 0, 1, 6, 60, }, - { 8, 0, 0, 1, 6, 76, }, { 9, 0, 0, 1, 6, 60, }, { 0, 0, 0, 1, 7, 76, }, { 2, 0, 0, 1, 7, 60, }, @@ -41983,9 +42313,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 7, 76, }, { 4, 0, 0, 1, 7, 76, }, { 5, 0, 0, 1, 7, 60, }, - { 6, 0, 0, 1, 7, 76, }, - { 7, 0, 0, 1, 7, 60, }, - { 8, 0, 0, 1, 7, 76, }, { 9, 0, 0, 1, 7, 60, }, { 0, 0, 0, 1, 8, 68, }, { 2, 0, 0, 1, 8, 60, }, @@ -41993,9 +42320,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 8, 68, }, { 4, 0, 0, 1, 8, 76, }, { 5, 0, 0, 1, 8, 60, }, - { 6, 0, 0, 1, 8, 68, }, - { 7, 0, 0, 1, 8, 60, }, - { 8, 0, 0, 1, 8, 68, }, { 9, 0, 0, 1, 8, 60, }, { 0, 0, 0, 1, 9, 64, }, { 2, 0, 0, 1, 9, 60, }, @@ -42003,9 +42327,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 9, 64, }, { 4, 0, 0, 1, 9, 76, }, { 5, 0, 0, 1, 9, 60, }, - { 6, 0, 0, 1, 9, 64, }, - { 7, 0, 0, 1, 9, 60, }, - { 8, 0, 0, 1, 9, 64, }, { 9, 0, 0, 1, 9, 60, }, { 0, 0, 0, 1, 10, 60, }, { 2, 0, 0, 1, 10, 60, }, @@ -42013,9 +42334,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 10, 60, }, { 4, 0, 0, 1, 10, 76, }, { 5, 0, 0, 1, 10, 60, }, - { 6, 0, 0, 1, 10, 60, }, - { 7, 0, 0, 1, 10, 60, }, - { 8, 0, 0, 1, 10, 60, }, { 9, 0, 0, 1, 10, 60, }, { 0, 0, 0, 1, 11, 52, }, { 2, 0, 0, 1, 11, 60, }, @@ -42023,39 +42341,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 11, 52, }, { 4, 0, 0, 1, 11, 76, }, { 5, 0, 0, 1, 11, 60, }, - { 6, 0, 0, 1, 11, 52, }, - { 7, 0, 0, 1, 11, 60, }, - { 8, 0, 0, 1, 11, 52, }, - { 9, 0, 0, 1, 11, 60, }, - { 0, 0, 0, 1, 12, 32, }, + { 9, 0, 0, 1, 11, 52, }, + { 0, 0, 0, 1, 12, 40, }, { 2, 0, 0, 1, 12, 60, }, { 1, 0, 0, 1, 12, 76, }, { 3, 0, 0, 1, 12, 40, }, { 4, 0, 0, 1, 12, 76, }, { 5, 0, 0, 1, 12, 60, }, - { 6, 0, 0, 1, 12, 40, }, - { 7, 0, 0, 1, 12, 60, }, - { 8, 0, 0, 1, 12, 40, }, - { 9, 0, 0, 1, 12, 60, }, - { 0, 0, 0, 1, 13, 20, }, + { 9, 0, 0, 1, 12, 48, }, + { 0, 0, 0, 1, 13, 28, }, { 2, 0, 0, 1, 13, 60, }, { 1, 0, 0, 1, 13, 76, }, { 3, 0, 0, 1, 13, 28, }, { 4, 0, 0, 1, 13, 74, }, { 5, 0, 0, 1, 13, 60, }, - { 6, 0, 0, 1, 13, 28, }, - { 7, 0, 0, 1, 13, 60, }, - { 8, 0, 0, 1, 13, 28, }, - { 9, 0, 0, 1, 13, 60, }, + { 9, 0, 0, 1, 13, 40, }, { 0, 0, 0, 1, 14, 127, }, { 2, 0, 0, 1, 14, 127, }, { 1, 0, 0, 1, 14, 127, }, { 3, 0, 0, 1, 14, 127, }, { 4, 0, 0, 1, 14, 127, }, { 5, 0, 0, 1, 14, 127, }, - { 6, 0, 0, 1, 14, 127, }, - { 7, 0, 0, 1, 14, 127, }, - { 8, 0, 0, 1, 14, 127, }, { 9, 0, 0, 1, 14, 127, }, { 0, 0, 0, 2, 1, 52, }, { 2, 0, 0, 2, 1, 60, }, @@ -42063,9 +42369,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 1, 52, }, { 4, 0, 0, 2, 1, 76, }, { 5, 0, 0, 2, 1, 60, }, - { 6, 0, 0, 2, 1, 52, }, - { 7, 0, 0, 2, 1, 60, }, - { 8, 0, 0, 2, 1, 52, }, { 9, 0, 0, 2, 1, 60, }, { 0, 0, 0, 2, 2, 60, }, { 2, 0, 0, 2, 2, 60, }, @@ -42073,9 +42376,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 2, 60, }, { 4, 0, 0, 2, 2, 76, }, { 5, 0, 0, 2, 2, 60, }, - { 6, 0, 0, 2, 2, 60, }, - { 7, 0, 0, 2, 2, 60, }, - { 8, 0, 0, 2, 2, 60, }, { 9, 0, 0, 2, 2, 60, }, { 0, 0, 0, 2, 3, 64, }, { 2, 0, 0, 2, 3, 60, }, @@ -42083,9 +42383,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 3, 64, }, { 4, 0, 0, 2, 3, 76, }, { 5, 0, 0, 2, 3, 60, }, - { 6, 0, 0, 2, 3, 64, }, - { 7, 0, 0, 2, 3, 60, }, - { 8, 0, 0, 2, 3, 64, }, { 9, 0, 0, 2, 3, 60, }, { 0, 0, 0, 2, 4, 68, }, { 2, 0, 0, 2, 4, 60, }, @@ -42093,9 +42390,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 4, 68, }, { 4, 0, 0, 2, 4, 76, }, { 5, 0, 0, 2, 4, 60, }, - { 6, 0, 0, 2, 4, 68, }, - { 7, 0, 0, 2, 4, 60, }, - { 8, 0, 0, 2, 4, 68, }, { 9, 0, 0, 2, 4, 60, }, { 0, 0, 0, 2, 5, 76, }, { 2, 0, 0, 2, 5, 60, }, @@ -42103,9 +42397,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 5, 76, }, { 4, 0, 0, 2, 5, 76, }, { 5, 0, 0, 2, 5, 60, }, - { 6, 0, 0, 2, 5, 76, }, - { 7, 0, 0, 2, 5, 60, }, - { 8, 0, 0, 2, 5, 76, }, { 9, 0, 0, 2, 5, 60, }, { 0, 0, 0, 2, 6, 76, }, { 2, 0, 0, 2, 6, 60, }, @@ -42113,9 +42404,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 6, 76, }, { 4, 0, 0, 2, 6, 76, }, { 5, 0, 0, 2, 6, 60, }, - { 6, 0, 0, 2, 6, 76, }, - { 7, 0, 0, 2, 6, 60, }, - { 8, 0, 0, 2, 6, 76, }, { 9, 0, 0, 2, 6, 60, }, { 0, 0, 0, 2, 7, 76, }, { 2, 0, 0, 2, 7, 60, }, @@ -42123,9 +42411,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 7, 76, }, { 4, 0, 0, 2, 7, 76, }, { 5, 0, 0, 2, 7, 60, }, - { 6, 0, 0, 2, 7, 76, }, - { 7, 0, 0, 2, 7, 60, }, - { 8, 0, 0, 2, 7, 76, }, { 9, 0, 0, 2, 7, 60, }, { 0, 0, 0, 2, 8, 68, }, { 2, 0, 0, 2, 8, 60, }, @@ -42133,9 +42418,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 8, 68, }, { 4, 0, 0, 2, 8, 76, }, { 5, 0, 0, 2, 8, 60, }, - { 6, 0, 0, 2, 8, 68, }, - { 7, 0, 0, 2, 8, 60, }, - { 8, 0, 0, 2, 8, 68, }, { 9, 0, 0, 2, 8, 60, }, { 0, 0, 0, 2, 9, 64, }, { 2, 0, 0, 2, 9, 60, }, @@ -42143,9 +42425,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 9, 64, }, { 4, 0, 0, 2, 9, 76, }, { 5, 0, 0, 2, 9, 60, }, - { 6, 0, 0, 2, 9, 64, }, - { 7, 0, 0, 2, 9, 60, }, - { 8, 0, 0, 2, 9, 64, }, { 9, 0, 0, 2, 9, 60, }, { 0, 0, 0, 2, 10, 60, }, { 2, 0, 0, 2, 10, 60, }, @@ -42153,9 +42432,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 10, 60, }, { 4, 0, 0, 2, 10, 76, }, { 5, 0, 0, 2, 10, 60, }, - { 6, 0, 0, 2, 10, 60, }, - { 7, 0, 0, 2, 10, 60, }, - { 8, 0, 0, 2, 10, 60, }, { 9, 0, 0, 2, 10, 60, }, { 0, 0, 0, 2, 11, 52, }, { 2, 0, 0, 2, 11, 60, }, @@ -42163,39 +42439,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 11, 52, }, { 4, 0, 0, 2, 11, 76, }, { 5, 0, 0, 2, 11, 60, }, - { 6, 0, 0, 2, 11, 52, }, - { 7, 0, 0, 2, 11, 60, }, - { 8, 0, 0, 2, 11, 52, }, - { 9, 0, 0, 2, 11, 60, }, - { 0, 0, 0, 2, 12, 32, }, + { 9, 0, 0, 2, 11, 52, }, + { 0, 0, 0, 2, 12, 40, }, { 2, 0, 0, 2, 12, 60, }, { 1, 0, 0, 2, 12, 76, }, { 3, 0, 0, 2, 12, 40, }, { 4, 0, 0, 2, 12, 76, }, { 5, 0, 0, 2, 12, 60, }, - { 6, 0, 0, 2, 12, 40, }, - { 7, 0, 0, 2, 12, 60, }, - { 8, 0, 0, 2, 12, 40, }, - { 9, 0, 0, 2, 12, 60, }, - { 0, 0, 0, 2, 13, 20, }, + { 9, 0, 0, 2, 12, 48, }, + { 0, 0, 0, 2, 13, 28, }, { 2, 0, 0, 2, 13, 60, }, { 1, 0, 0, 2, 13, 76, }, { 3, 0, 0, 2, 13, 28, }, { 4, 0, 0, 2, 13, 74, }, { 5, 0, 0, 2, 13, 60, }, - { 6, 0, 0, 2, 13, 28, }, - { 7, 0, 0, 2, 13, 60, }, - { 8, 0, 0, 2, 13, 28, }, - { 9, 0, 0, 2, 13, 60, }, + { 9, 0, 0, 2, 13, 40, }, { 0, 0, 0, 2, 14, 127, }, { 2, 0, 0, 2, 14, 127, }, { 1, 0, 0, 2, 14, 127, }, { 3, 0, 0, 2, 14, 127, }, { 4, 0, 0, 2, 14, 127, }, { 5, 0, 0, 2, 14, 127, }, - { 6, 0, 0, 2, 14, 127, }, - { 7, 0, 0, 2, 14, 127, }, - { 8, 0, 0, 2, 14, 127, }, { 9, 0, 0, 2, 14, 127, }, { 0, 0, 0, 3, 1, 52, }, { 2, 0, 0, 3, 1, 36, }, @@ -42203,9 +42467,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 1, 52, }, { 4, 0, 0, 3, 1, 72, }, { 5, 0, 0, 3, 1, 36, }, - { 6, 0, 0, 3, 1, 52, }, - { 7, 0, 0, 3, 1, 36, }, - { 8, 0, 0, 3, 1, 52, }, { 9, 0, 0, 3, 1, 36, }, { 0, 0, 0, 3, 2, 60, }, { 2, 0, 0, 3, 2, 36, }, @@ -42213,9 +42474,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 2, 60, }, { 4, 0, 0, 3, 2, 72, }, { 5, 0, 0, 3, 2, 36, }, - { 6, 0, 0, 3, 2, 60, }, - { 7, 0, 0, 3, 2, 36, }, - { 8, 0, 0, 3, 2, 60, }, { 9, 0, 0, 3, 2, 36, }, { 0, 0, 0, 3, 3, 64, }, { 2, 0, 0, 3, 3, 36, }, @@ -42223,9 +42481,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 3, 64, }, { 4, 0, 0, 3, 3, 72, }, { 5, 0, 0, 3, 3, 36, }, - { 6, 0, 0, 3, 3, 64, }, - { 7, 0, 0, 3, 3, 36, }, - { 8, 0, 0, 3, 3, 64, }, { 9, 0, 0, 3, 3, 36, }, { 0, 0, 0, 3, 4, 68, }, { 2, 0, 0, 3, 4, 36, }, @@ -42233,9 +42488,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 4, 68, }, { 4, 0, 0, 3, 4, 72, }, { 5, 0, 0, 3, 4, 36, }, - { 6, 0, 0, 3, 4, 68, }, - { 7, 0, 0, 3, 4, 36, }, - { 8, 0, 0, 3, 4, 68, }, { 9, 0, 0, 3, 4, 36, }, { 0, 0, 0, 3, 5, 76, }, { 2, 0, 0, 3, 5, 36, }, @@ -42243,9 +42495,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 5, 76, }, { 4, 0, 0, 3, 5, 72, }, { 5, 0, 0, 3, 5, 36, }, - { 6, 0, 0, 3, 5, 76, }, - { 7, 0, 0, 3, 5, 36, }, - { 8, 0, 0, 3, 5, 76, }, { 9, 0, 0, 3, 5, 36, }, { 0, 0, 0, 3, 6, 76, }, { 2, 0, 0, 3, 6, 36, }, @@ -42253,9 +42502,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 6, 76, }, { 4, 0, 0, 3, 6, 72, }, { 5, 0, 0, 3, 6, 36, }, - { 6, 0, 0, 3, 6, 76, }, - { 7, 0, 0, 3, 6, 36, }, - { 8, 0, 0, 3, 6, 76, }, { 9, 0, 0, 3, 6, 36, }, { 0, 0, 0, 3, 7, 76, }, { 2, 0, 0, 3, 7, 36, }, @@ -42263,9 +42509,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 7, 76, }, { 4, 0, 0, 3, 7, 72, }, { 5, 0, 0, 3, 7, 36, }, - { 6, 0, 0, 3, 7, 76, }, - { 7, 0, 0, 3, 7, 36, }, - { 8, 0, 0, 3, 7, 76, }, { 9, 0, 0, 3, 7, 36, }, { 0, 0, 0, 3, 8, 68, }, { 2, 0, 0, 3, 8, 36, }, @@ -42273,9 +42516,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 8, 68, }, { 4, 0, 0, 3, 8, 72, }, { 5, 0, 0, 3, 8, 36, }, - { 6, 0, 0, 3, 8, 68, }, - { 7, 0, 0, 3, 8, 36, }, - { 8, 0, 0, 3, 8, 68, }, { 9, 0, 0, 3, 8, 36, }, { 0, 0, 0, 3, 9, 64, }, { 2, 0, 0, 3, 9, 36, }, @@ -42283,9 +42523,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 9, 64, }, { 4, 0, 0, 3, 9, 72, }, { 5, 0, 0, 3, 9, 36, }, - { 6, 0, 0, 3, 9, 64, }, - { 7, 0, 0, 3, 9, 36, }, - { 8, 0, 0, 3, 9, 64, }, { 9, 0, 0, 3, 9, 36, }, { 0, 0, 0, 3, 10, 60, }, { 2, 0, 0, 3, 10, 36, }, @@ -42293,9 +42530,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 10, 60, }, { 4, 0, 0, 3, 10, 72, }, { 5, 0, 0, 3, 10, 36, }, - { 6, 0, 0, 3, 10, 60, }, - { 7, 0, 0, 3, 10, 36, }, - { 8, 0, 0, 3, 10, 60, }, { 9, 0, 0, 3, 10, 36, }, { 0, 0, 0, 3, 11, 52, }, { 2, 0, 0, 3, 11, 36, }, @@ -42303,39 +42537,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 11, 52, }, { 4, 0, 0, 3, 11, 72, }, { 5, 0, 0, 3, 11, 36, }, - { 6, 0, 0, 3, 11, 52, }, - { 7, 0, 0, 3, 11, 36, }, - { 8, 0, 0, 3, 11, 52, }, - { 9, 0, 0, 3, 11, 36, }, - { 0, 0, 0, 3, 12, 32, }, + { 9, 0, 0, 3, 11, 40, }, + { 0, 0, 0, 3, 12, 40, }, { 2, 0, 0, 3, 12, 36, }, { 1, 0, 0, 3, 12, 66, }, { 3, 0, 0, 3, 12, 40, }, { 4, 0, 0, 3, 12, 72, }, { 5, 0, 0, 3, 12, 36, }, - { 6, 0, 0, 3, 12, 40, }, - { 7, 0, 0, 3, 12, 36, }, - { 8, 0, 0, 3, 12, 40, }, { 9, 0, 0, 3, 12, 36, }, - { 0, 0, 0, 3, 13, 20, }, + { 0, 0, 0, 3, 13, 28, }, { 2, 0, 0, 3, 13, 36, }, { 1, 0, 0, 3, 13, 66, }, { 3, 0, 0, 3, 13, 28, }, { 4, 0, 0, 3, 13, 68, }, { 5, 0, 0, 3, 13, 36, }, - { 6, 0, 0, 3, 13, 28, }, - { 7, 0, 0, 3, 13, 36, }, - { 8, 0, 0, 3, 13, 28, }, - { 9, 0, 0, 3, 13, 36, }, + { 9, 0, 0, 3, 13, 28, }, { 0, 0, 0, 3, 14, 127, }, { 2, 0, 0, 3, 14, 127, }, { 1, 0, 0, 3, 14, 127, }, { 3, 0, 0, 3, 14, 127, }, { 4, 0, 0, 3, 14, 127, }, { 5, 0, 0, 3, 14, 127, }, - { 6, 0, 0, 3, 14, 127, }, - { 7, 0, 0, 3, 14, 127, }, - { 8, 0, 0, 3, 14, 127, }, { 9, 0, 0, 3, 14, 127, }, { 0, 0, 1, 2, 1, 127, }, { 2, 0, 1, 2, 1, 127, }, @@ -42343,29 +42565,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 1, 127, }, { 4, 0, 1, 2, 1, 127, }, { 5, 0, 1, 2, 1, 127, }, - { 6, 0, 1, 2, 1, 127, }, - { 7, 0, 1, 2, 1, 127, }, - { 8, 0, 1, 2, 1, 127, }, - { 9, 0, 1, 2, 1, 127, }, + { 9, 0, 1, 2, 1, 60, }, { 0, 0, 1, 2, 2, 127, }, { 2, 0, 1, 2, 2, 127, }, { 1, 0, 1, 2, 2, 127, }, { 3, 0, 1, 2, 2, 127, }, { 4, 0, 1, 2, 2, 127, }, { 5, 0, 1, 2, 2, 127, }, - { 6, 0, 1, 2, 2, 127, }, - { 7, 0, 1, 2, 2, 127, }, - { 8, 0, 1, 2, 2, 127, }, - { 9, 0, 1, 2, 2, 127, }, + { 9, 0, 1, 2, 2, 60, }, { 0, 0, 1, 2, 3, 52, }, { 2, 0, 1, 2, 3, 60, }, { 1, 0, 1, 2, 3, 72, }, { 3, 0, 1, 2, 3, 52, }, { 4, 0, 1, 2, 3, 72, }, { 5, 0, 1, 2, 3, 60, }, - { 6, 0, 1, 2, 3, 52, }, - { 7, 0, 1, 2, 3, 60, }, - { 8, 0, 1, 2, 3, 52, }, { 9, 0, 1, 2, 3, 60, }, { 0, 0, 1, 2, 4, 52, }, { 2, 0, 1, 2, 4, 60, }, @@ -42373,9 +42586,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 4, 52, }, { 4, 0, 1, 2, 4, 72, }, { 5, 0, 1, 2, 4, 60, }, - { 6, 0, 1, 2, 4, 52, }, - { 7, 0, 1, 2, 4, 60, }, - { 8, 0, 1, 2, 4, 52, }, { 9, 0, 1, 2, 4, 60, }, { 0, 0, 1, 2, 5, 60, }, { 2, 0, 1, 2, 5, 60, }, @@ -42383,9 +42593,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 5, 60, }, { 4, 0, 1, 2, 5, 72, }, { 5, 0, 1, 2, 5, 60, }, - { 6, 0, 1, 2, 5, 60, }, - { 7, 0, 1, 2, 5, 60, }, - { 8, 0, 1, 2, 5, 60, }, { 9, 0, 1, 2, 5, 60, }, { 0, 0, 1, 2, 6, 64, }, { 2, 0, 1, 2, 6, 60, }, @@ -42393,9 +42600,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 6, 64, }, { 4, 0, 1, 2, 6, 72, }, { 5, 0, 1, 2, 6, 60, }, - { 6, 0, 1, 2, 6, 64, }, - { 7, 0, 1, 2, 6, 60, }, - { 8, 0, 1, 2, 6, 64, }, { 9, 0, 1, 2, 6, 60, }, { 0, 0, 1, 2, 7, 60, }, { 2, 0, 1, 2, 7, 60, }, @@ -42403,9 +42607,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 7, 60, }, { 4, 0, 1, 2, 7, 72, }, { 5, 0, 1, 2, 7, 60, }, - { 6, 0, 1, 2, 7, 60, }, - { 7, 0, 1, 2, 7, 60, }, - { 8, 0, 1, 2, 7, 60, }, { 9, 0, 1, 2, 7, 60, }, { 0, 0, 1, 2, 8, 52, }, { 2, 0, 1, 2, 8, 60, }, @@ -42413,9 +42614,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 8, 52, }, { 4, 0, 1, 2, 8, 72, }, { 5, 0, 1, 2, 8, 60, }, - { 6, 0, 1, 2, 8, 52, }, - { 7, 0, 1, 2, 8, 60, }, - { 8, 0, 1, 2, 8, 52, }, { 9, 0, 1, 2, 8, 60, }, { 0, 0, 1, 2, 9, 52, }, { 2, 0, 1, 2, 9, 60, }, @@ -42423,9 +42621,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 9, 52, }, { 4, 0, 1, 2, 9, 72, }, { 5, 0, 1, 2, 9, 60, }, - { 6, 0, 1, 2, 9, 52, }, - { 7, 0, 1, 2, 9, 60, }, - { 8, 0, 1, 2, 9, 52, }, { 9, 0, 1, 2, 9, 60, }, { 0, 0, 1, 2, 10, 40, }, { 2, 0, 1, 2, 10, 60, }, @@ -42433,9 +42628,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 10, 40, }, { 4, 0, 1, 2, 10, 72, }, { 5, 0, 1, 2, 10, 60, }, - { 6, 0, 1, 2, 10, 40, }, - { 7, 0, 1, 2, 10, 60, }, - { 8, 0, 1, 2, 10, 40, }, { 9, 0, 1, 2, 10, 60, }, { 0, 0, 1, 2, 11, 28, }, { 2, 0, 1, 2, 11, 60, }, @@ -42443,39 +42635,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 11, 28, }, { 4, 0, 1, 2, 11, 70, }, { 5, 0, 1, 2, 11, 60, }, - { 6, 0, 1, 2, 11, 28, }, - { 7, 0, 1, 2, 11, 60, }, - { 8, 0, 1, 2, 11, 28, }, - { 9, 0, 1, 2, 11, 60, }, + { 9, 0, 1, 2, 11, 44, }, { 0, 0, 1, 2, 12, 127, }, { 2, 0, 1, 2, 12, 127, }, { 1, 0, 1, 2, 12, 127, }, { 3, 0, 1, 2, 12, 127, }, { 4, 0, 1, 2, 12, 127, }, { 5, 0, 1, 2, 12, 127, }, - { 6, 0, 1, 2, 12, 127, }, - { 7, 0, 1, 2, 12, 127, }, - { 8, 0, 1, 2, 12, 127, }, - { 9, 0, 1, 2, 12, 127, }, + { 9, 0, 1, 2, 12, 44, }, { 0, 0, 1, 2, 13, 127, }, { 2, 0, 1, 2, 13, 127, }, { 1, 0, 1, 2, 13, 127, }, { 3, 0, 1, 2, 13, 127, }, { 4, 0, 1, 2, 13, 127, }, { 5, 0, 1, 2, 13, 127, }, - { 6, 0, 1, 2, 13, 127, }, - { 7, 0, 1, 2, 13, 127, }, - { 8, 0, 1, 2, 13, 127, }, - { 9, 0, 1, 2, 13, 127, }, + { 9, 0, 1, 2, 13, 20, }, { 0, 0, 1, 2, 14, 127, }, { 2, 0, 1, 2, 14, 127, }, { 1, 0, 1, 2, 14, 127, }, { 3, 0, 1, 2, 14, 127, }, { 4, 0, 1, 2, 14, 127, }, { 5, 0, 1, 2, 14, 127, }, - { 6, 0, 1, 2, 14, 127, }, - { 7, 0, 1, 2, 14, 127, }, - { 8, 0, 1, 2, 14, 127, }, { 9, 0, 1, 2, 14, 127, }, { 0, 0, 1, 3, 1, 127, }, { 2, 0, 1, 3, 1, 127, }, @@ -42483,29 +42663,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 1, 127, }, { 4, 0, 1, 3, 1, 127, }, { 5, 0, 1, 3, 1, 127, }, - { 6, 0, 1, 3, 1, 127, }, - { 7, 0, 1, 3, 1, 127, }, - { 8, 0, 1, 3, 1, 127, }, - { 9, 0, 1, 3, 1, 127, }, + { 9, 0, 1, 3, 1, 36, }, { 0, 0, 1, 3, 2, 127, }, { 2, 0, 1, 3, 2, 127, }, { 1, 0, 1, 3, 2, 127, }, { 3, 0, 1, 3, 2, 127, }, { 4, 0, 1, 3, 2, 127, }, { 5, 0, 1, 3, 2, 127, }, - { 6, 0, 1, 3, 2, 127, }, - { 7, 0, 1, 3, 2, 127, }, - { 8, 0, 1, 3, 2, 127, }, - { 9, 0, 1, 3, 2, 127, }, + { 9, 0, 1, 3, 2, 36, }, { 0, 0, 1, 3, 3, 48, }, { 2, 0, 1, 3, 3, 36, }, { 1, 0, 1, 3, 3, 66, }, { 3, 0, 1, 3, 3, 48, }, { 4, 0, 1, 3, 3, 68, }, { 5, 0, 1, 3, 3, 36, }, - { 6, 0, 1, 3, 3, 48, }, - { 7, 0, 1, 3, 3, 36, }, - { 8, 0, 1, 3, 3, 48, }, { 9, 0, 1, 3, 3, 36, }, { 0, 0, 1, 3, 4, 48, }, { 2, 0, 1, 3, 4, 36, }, @@ -42513,9 +42684,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 4, 48, }, { 4, 0, 1, 3, 4, 70, }, { 5, 0, 1, 3, 4, 36, }, - { 6, 0, 1, 3, 4, 48, }, - { 7, 0, 1, 3, 4, 36, }, - { 8, 0, 1, 3, 4, 48, }, { 9, 0, 1, 3, 4, 36, }, { 0, 0, 1, 3, 5, 60, }, { 2, 0, 1, 3, 5, 36, }, @@ -42523,9 +42691,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 5, 60, }, { 4, 0, 1, 3, 5, 70, }, { 5, 0, 1, 3, 5, 36, }, - { 6, 0, 1, 3, 5, 60, }, - { 7, 0, 1, 3, 5, 36, }, - { 8, 0, 1, 3, 5, 60, }, { 9, 0, 1, 3, 5, 36, }, { 0, 0, 1, 3, 6, 64, }, { 2, 0, 1, 3, 6, 36, }, @@ -42533,9 +42698,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 6, 64, }, { 4, 0, 1, 3, 6, 70, }, { 5, 0, 1, 3, 6, 36, }, - { 6, 0, 1, 3, 6, 64, }, - { 7, 0, 1, 3, 6, 36, }, - { 8, 0, 1, 3, 6, 64, }, { 9, 0, 1, 3, 6, 36, }, { 0, 0, 1, 3, 7, 60, }, { 2, 0, 1, 3, 7, 36, }, @@ -42543,9 +42705,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 7, 60, }, { 4, 0, 1, 3, 7, 70, }, { 5, 0, 1, 3, 7, 36, }, - { 6, 0, 1, 3, 7, 60, }, - { 7, 0, 1, 3, 7, 36, }, - { 8, 0, 1, 3, 7, 60, }, { 9, 0, 1, 3, 7, 36, }, { 0, 0, 1, 3, 8, 52, }, { 2, 0, 1, 3, 8, 36, }, @@ -42553,9 +42712,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 8, 52, }, { 4, 0, 1, 3, 8, 70, }, { 5, 0, 1, 3, 8, 36, }, - { 6, 0, 1, 3, 8, 52, }, - { 7, 0, 1, 3, 8, 36, }, - { 8, 0, 1, 3, 8, 52, }, { 9, 0, 1, 3, 8, 36, }, { 0, 0, 1, 3, 9, 52, }, { 2, 0, 1, 3, 9, 36, }, @@ -42563,9 +42719,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 9, 52, }, { 4, 0, 1, 3, 9, 70, }, { 5, 0, 1, 3, 9, 36, }, - { 6, 0, 1, 3, 9, 52, }, - { 7, 0, 1, 3, 9, 36, }, - { 8, 0, 1, 3, 9, 52, }, { 9, 0, 1, 3, 9, 36, }, { 0, 0, 1, 3, 10, 40, }, { 2, 0, 1, 3, 10, 36, }, @@ -42573,9 +42726,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 10, 40, }, { 4, 0, 1, 3, 10, 70, }, { 5, 0, 1, 3, 10, 36, }, - { 6, 0, 1, 3, 10, 40, }, - { 7, 0, 1, 3, 10, 36, }, - { 8, 0, 1, 3, 10, 40, }, { 9, 0, 1, 3, 10, 36, }, { 0, 0, 1, 3, 11, 26, }, { 2, 0, 1, 3, 11, 36, }, @@ -42583,39 +42733,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 11, 26, }, { 4, 0, 1, 3, 11, 66, }, { 5, 0, 1, 3, 11, 36, }, - { 6, 0, 1, 3, 11, 26, }, - { 7, 0, 1, 3, 11, 36, }, - { 8, 0, 1, 3, 11, 26, }, - { 9, 0, 1, 3, 11, 36, }, + { 9, 0, 1, 3, 11, 32, }, { 0, 0, 1, 3, 12, 127, }, { 2, 0, 1, 3, 12, 127, }, { 1, 0, 1, 3, 12, 127, }, { 3, 0, 1, 3, 12, 127, }, { 4, 0, 1, 3, 12, 127, }, { 5, 0, 1, 3, 12, 127, }, - { 6, 0, 1, 3, 12, 127, }, - { 7, 0, 1, 3, 12, 127, }, - { 8, 0, 1, 3, 12, 127, }, - { 9, 0, 1, 3, 12, 127, }, + { 9, 0, 1, 3, 12, 32, }, { 0, 0, 1, 3, 13, 127, }, { 2, 0, 1, 3, 13, 127, }, { 1, 0, 1, 3, 13, 127, }, { 3, 0, 1, 3, 13, 127, }, { 4, 0, 1, 3, 13, 127, }, { 5, 0, 1, 3, 13, 127, }, - { 6, 0, 1, 3, 13, 127, }, - { 7, 0, 1, 3, 13, 127, }, - { 8, 0, 1, 3, 13, 127, }, - { 9, 0, 1, 3, 13, 127, }, + { 9, 0, 1, 3, 13, 8, }, { 0, 0, 1, 3, 14, 127, }, { 2, 0, 1, 3, 14, 127, }, { 1, 0, 1, 3, 14, 127, }, { 3, 0, 1, 3, 14, 127, }, { 4, 0, 1, 3, 14, 127, }, { 5, 0, 1, 3, 14, 127, }, - { 6, 0, 1, 3, 14, 127, }, - { 7, 0, 1, 3, 14, 127, }, - { 8, 0, 1, 3, 14, 127, }, { 9, 0, 1, 3, 14, 127, }, { 0, 1, 0, 1, 36, 74, }, { 2, 1, 0, 1, 36, 58, }, @@ -42623,89 +42761,62 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 36, 62, }, { 4, 1, 0, 1, 36, 74, }, { 5, 1, 0, 1, 36, 58, }, - { 6, 1, 0, 1, 36, 64, }, - { 7, 1, 0, 1, 36, 54, }, - { 8, 1, 0, 1, 36, 62, }, - { 9, 1, 0, 1, 36, 62, }, + { 9, 1, 0, 1, 36, 64, }, { 0, 1, 0, 1, 40, 76, }, { 2, 1, 0, 1, 40, 58, }, { 1, 1, 0, 1, 40, 62, }, { 3, 1, 0, 1, 40, 62, }, { 4, 1, 0, 1, 40, 76, }, { 5, 1, 0, 1, 40, 58, }, - { 6, 1, 0, 1, 40, 64, }, - { 7, 1, 0, 1, 40, 54, }, - { 8, 1, 0, 1, 40, 62, }, - { 9, 1, 0, 1, 40, 62, }, + { 9, 1, 0, 1, 40, 64, }, { 0, 1, 0, 1, 44, 76, }, { 2, 1, 0, 1, 44, 58, }, { 1, 1, 0, 1, 44, 62, }, { 3, 1, 0, 1, 44, 62, }, { 4, 1, 0, 1, 44, 76, }, { 5, 1, 0, 1, 44, 58, }, - { 6, 1, 0, 1, 44, 64, }, - { 7, 1, 0, 1, 44, 54, }, - { 8, 1, 0, 1, 44, 62, }, - { 9, 1, 0, 1, 44, 62, }, + { 9, 1, 0, 1, 44, 64, }, { 0, 1, 0, 1, 48, 76, }, { 2, 1, 0, 1, 48, 58, }, { 1, 1, 0, 1, 48, 62, }, { 3, 1, 0, 1, 48, 62, }, { 4, 1, 0, 1, 48, 58, }, { 5, 1, 0, 1, 48, 58, }, - { 6, 1, 0, 1, 48, 64, }, - { 7, 1, 0, 1, 48, 54, }, - { 8, 1, 0, 1, 48, 62, }, - { 9, 1, 0, 1, 48, 62, }, + { 9, 1, 0, 1, 48, 64, }, { 0, 1, 0, 1, 52, 76, }, { 2, 1, 0, 1, 52, 58, }, { 1, 1, 0, 1, 52, 62, }, { 3, 1, 0, 1, 52, 64, }, { 4, 1, 0, 1, 52, 76, }, { 5, 1, 0, 1, 52, 58, }, - { 6, 1, 0, 1, 52, 76, }, - { 7, 1, 0, 1, 52, 54, }, - { 8, 1, 0, 1, 52, 76, }, - { 9, 1, 0, 1, 52, 62, }, + { 9, 1, 0, 1, 52, 64, }, { 0, 1, 0, 1, 56, 76, }, { 2, 1, 0, 1, 56, 58, }, { 1, 1, 0, 1, 56, 62, }, { 3, 1, 0, 1, 56, 64, }, { 4, 1, 0, 1, 56, 76, }, { 5, 1, 0, 1, 56, 58, }, - { 6, 1, 0, 1, 56, 76, }, - { 7, 1, 0, 1, 56, 54, }, - { 8, 1, 0, 1, 56, 76, }, - { 9, 1, 0, 1, 56, 62, }, + { 9, 1, 0, 1, 56, 64, }, { 0, 1, 0, 1, 60, 76, }, { 2, 1, 0, 1, 60, 58, }, { 1, 1, 0, 1, 60, 62, }, { 3, 1, 0, 1, 60, 64, }, { 4, 1, 0, 1, 60, 76, }, { 5, 1, 0, 1, 60, 58, }, - { 6, 1, 0, 1, 60, 76, }, - { 7, 1, 0, 1, 60, 54, }, - { 8, 1, 0, 1, 60, 76, }, - { 9, 1, 0, 1, 60, 62, }, + { 9, 1, 0, 1, 60, 64, }, { 0, 1, 0, 1, 64, 76, }, { 2, 1, 0, 1, 64, 58, }, { 1, 1, 0, 1, 64, 62, }, { 3, 1, 0, 1, 64, 64, }, { 4, 1, 0, 1, 64, 76, }, { 5, 1, 0, 1, 64, 58, }, - { 6, 1, 0, 1, 64, 74, }, - { 7, 1, 0, 1, 64, 54, }, - { 8, 1, 0, 1, 64, 74, }, - { 9, 1, 0, 1, 64, 62, }, + { 9, 1, 0, 1, 64, 64, }, { 0, 1, 0, 1, 100, 68, }, { 2, 1, 0, 1, 100, 58, }, { 1, 1, 0, 1, 100, 76, }, { 3, 1, 0, 1, 100, 68, }, { 4, 1, 0, 1, 100, 76, }, { 5, 1, 0, 1, 100, 58, }, - { 6, 1, 0, 1, 100, 72, }, - { 7, 1, 0, 1, 100, 54, }, - { 8, 1, 0, 1, 100, 72, }, { 9, 1, 0, 1, 100, 127, }, { 0, 1, 0, 1, 104, 76, }, { 2, 1, 0, 1, 104, 58, }, @@ -42713,9 +42824,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 104, 76, }, { 4, 1, 0, 1, 104, 76, }, { 5, 1, 0, 1, 104, 58, }, - { 6, 1, 0, 1, 104, 76, }, - { 7, 1, 0, 1, 104, 54, }, - { 8, 1, 0, 1, 104, 76, }, { 9, 1, 0, 1, 104, 127, }, { 0, 1, 0, 1, 108, 76, }, { 2, 1, 0, 1, 108, 58, }, @@ -42723,9 +42831,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 108, 76, }, { 4, 1, 0, 1, 108, 76, }, { 5, 1, 0, 1, 108, 58, }, - { 6, 1, 0, 1, 108, 76, }, - { 7, 1, 0, 1, 108, 54, }, - { 8, 1, 0, 1, 108, 76, }, { 9, 1, 0, 1, 108, 127, }, { 0, 1, 0, 1, 112, 76, }, { 2, 1, 0, 1, 112, 58, }, @@ -42733,9 +42838,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 112, 76, }, { 4, 1, 0, 1, 112, 76, }, { 5, 1, 0, 1, 112, 58, }, - { 6, 1, 0, 1, 112, 76, }, - { 7, 1, 0, 1, 112, 54, }, - { 8, 1, 0, 1, 112, 76, }, { 9, 1, 0, 1, 112, 127, }, { 0, 1, 0, 1, 116, 76, }, { 2, 1, 0, 1, 116, 58, }, @@ -42743,9 +42845,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 116, 76, }, { 4, 1, 0, 1, 116, 76, }, { 5, 1, 0, 1, 116, 58, }, - { 6, 1, 0, 1, 116, 76, }, - { 7, 1, 0, 1, 116, 54, }, - { 8, 1, 0, 1, 116, 76, }, { 9, 1, 0, 1, 116, 127, }, { 0, 1, 0, 1, 120, 76, }, { 2, 1, 0, 1, 120, 58, }, @@ -42753,9 +42852,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 120, 127, }, { 4, 1, 0, 1, 120, 76, }, { 5, 1, 0, 1, 120, 127, }, - { 6, 1, 0, 1, 120, 76, }, - { 7, 1, 0, 1, 120, 54, }, - { 8, 1, 0, 1, 120, 76, }, { 9, 1, 0, 1, 120, 127, }, { 0, 1, 0, 1, 124, 76, }, { 2, 1, 0, 1, 124, 58, }, @@ -42763,9 +42859,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 124, 127, }, { 4, 1, 0, 1, 124, 76, }, { 5, 1, 0, 1, 124, 127, }, - { 6, 1, 0, 1, 124, 76, }, - { 7, 1, 0, 1, 124, 54, }, - { 8, 1, 0, 1, 124, 76, }, { 9, 1, 0, 1, 124, 127, }, { 0, 1, 0, 1, 128, 76, }, { 2, 1, 0, 1, 128, 58, }, @@ -42773,9 +42866,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 128, 127, }, { 4, 1, 0, 1, 128, 76, }, { 5, 1, 0, 1, 128, 127, }, - { 6, 1, 0, 1, 128, 76, }, - { 7, 1, 0, 1, 128, 54, }, - { 8, 1, 0, 1, 128, 76, }, { 9, 1, 0, 1, 128, 127, }, { 0, 1, 0, 1, 132, 76, }, { 2, 1, 0, 1, 132, 58, }, @@ -42783,9 +42873,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 132, 76, }, { 4, 1, 0, 1, 132, 76, }, { 5, 1, 0, 1, 132, 58, }, - { 6, 1, 0, 1, 132, 76, }, - { 7, 1, 0, 1, 132, 54, }, - { 8, 1, 0, 1, 132, 76, }, { 9, 1, 0, 1, 132, 127, }, { 0, 1, 0, 1, 136, 76, }, { 2, 1, 0, 1, 136, 58, }, @@ -42793,9 +42880,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 136, 76, }, { 4, 1, 0, 1, 136, 76, }, { 5, 1, 0, 1, 136, 58, }, - { 6, 1, 0, 1, 136, 76, }, - { 7, 1, 0, 1, 136, 54, }, - { 8, 1, 0, 1, 136, 76, }, { 9, 1, 0, 1, 136, 127, }, { 0, 1, 0, 1, 140, 74, }, { 2, 1, 0, 1, 140, 58, }, @@ -42803,9 +42887,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 140, 74, }, { 4, 1, 0, 1, 140, 76, }, { 5, 1, 0, 1, 140, 58, }, - { 6, 1, 0, 1, 140, 72, }, - { 7, 1, 0, 1, 140, 54, }, - { 8, 1, 0, 1, 140, 72, }, { 9, 1, 0, 1, 140, 127, }, { 0, 1, 0, 1, 144, 76, }, { 2, 1, 0, 1, 144, 127, }, @@ -42813,9 +42894,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 144, 76, }, { 4, 1, 0, 1, 144, 76, }, { 5, 1, 0, 1, 144, 127, }, - { 6, 1, 0, 1, 144, 76, }, - { 7, 1, 0, 1, 144, 127, }, - { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, { 0, 1, 0, 1, 149, 76, }, { 2, 1, 0, 1, 149, 28, }, @@ -42823,139 +42901,97 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, { 5, 1, 0, 1, 149, 76, }, - { 6, 1, 0, 1, 149, 76, }, - { 7, 1, 0, 1, 149, 54, }, - { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, 28, }, + { 9, 1, 0, 1, 149, 76, }, { 0, 1, 0, 1, 153, 76, }, { 2, 1, 0, 1, 153, 28, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, { 5, 1, 0, 1, 153, 76, }, - { 6, 1, 0, 1, 153, 76, }, - { 7, 1, 0, 1, 153, 54, }, - { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, 28, }, + { 9, 1, 0, 1, 153, 76, }, { 0, 1, 0, 1, 157, 76, }, { 2, 1, 0, 1, 157, 28, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, { 5, 1, 0, 1, 157, 76, }, - { 6, 1, 0, 1, 157, 76, }, - { 7, 1, 0, 1, 157, 54, }, - { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, 28, }, + { 9, 1, 0, 1, 157, 76, }, { 0, 1, 0, 1, 161, 76, }, { 2, 1, 0, 1, 161, 28, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, { 5, 1, 0, 1, 161, 76, }, - { 6, 1, 0, 1, 161, 76, }, - { 7, 1, 0, 1, 161, 54, }, - { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, 28, }, + { 9, 1, 0, 1, 161, 76, }, { 0, 1, 0, 1, 165, 76, }, { 2, 1, 0, 1, 165, 28, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, { 5, 1, 0, 1, 165, 76, }, - { 6, 1, 0, 1, 165, 76, }, - { 7, 1, 0, 1, 165, 54, }, - { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, 28, }, + { 9, 1, 0, 1, 165, 76, }, { 0, 1, 0, 2, 36, 70, }, { 2, 1, 0, 2, 36, 58, }, { 1, 1, 0, 2, 36, 64, }, { 3, 1, 0, 2, 36, 62, }, { 4, 1, 0, 2, 36, 76, }, { 5, 1, 0, 2, 36, 58, }, - { 6, 1, 0, 2, 36, 64, }, - { 7, 1, 0, 2, 36, 54, }, - { 8, 1, 0, 2, 36, 62, }, - { 9, 1, 0, 2, 36, 62, }, + { 9, 1, 0, 2, 36, 60, }, { 0, 1, 0, 2, 40, 76, }, { 2, 1, 0, 2, 40, 58, }, { 1, 1, 0, 2, 40, 62, }, { 3, 1, 0, 2, 40, 62, }, { 4, 1, 0, 2, 40, 76, }, { 5, 1, 0, 2, 40, 58, }, - { 6, 1, 0, 2, 40, 64, }, - { 7, 1, 0, 2, 40, 54, }, - { 8, 1, 0, 2, 40, 62, }, - { 9, 1, 0, 2, 40, 62, }, + { 9, 1, 0, 2, 40, 60, }, { 0, 1, 0, 2, 44, 76, }, { 2, 1, 0, 2, 44, 58, }, { 1, 1, 0, 2, 44, 62, }, { 3, 1, 0, 2, 44, 62, }, { 4, 1, 0, 2, 44, 76, }, { 5, 1, 0, 2, 44, 58, }, - { 6, 1, 0, 2, 44, 64, }, - { 7, 1, 0, 2, 44, 54, }, - { 8, 1, 0, 2, 44, 62, }, - { 9, 1, 0, 2, 44, 62, }, + { 9, 1, 0, 2, 44, 60, }, { 0, 1, 0, 2, 48, 76, }, { 2, 1, 0, 2, 48, 58, }, { 1, 1, 0, 2, 48, 62, }, { 3, 1, 0, 2, 48, 62, }, { 4, 1, 0, 2, 48, 58, }, { 5, 1, 0, 2, 48, 58, }, - { 6, 1, 0, 2, 48, 64, }, - { 7, 1, 0, 2, 48, 54, }, - { 8, 1, 0, 2, 48, 62, }, - { 9, 1, 0, 2, 48, 62, }, + { 9, 1, 0, 2, 48, 60, }, { 0, 1, 0, 2, 52, 76, }, { 2, 1, 0, 2, 52, 58, }, { 1, 1, 0, 2, 52, 62, }, { 3, 1, 0, 2, 52, 64, }, { 4, 1, 0, 2, 52, 76, }, { 5, 1, 0, 2, 52, 58, }, - { 6, 1, 0, 2, 52, 76, }, - { 7, 1, 0, 2, 52, 54, }, - { 8, 1, 0, 2, 52, 76, }, - { 9, 1, 0, 2, 52, 62, }, + { 9, 1, 0, 2, 52, 60, }, { 0, 1, 0, 2, 56, 76, }, { 2, 1, 0, 2, 56, 58, }, { 1, 1, 0, 2, 56, 62, }, { 3, 1, 0, 2, 56, 64, }, { 4, 1, 0, 2, 56, 76, }, { 5, 1, 0, 2, 56, 58, }, - { 6, 1, 0, 2, 56, 76, }, - { 7, 1, 0, 2, 56, 54, }, - { 8, 1, 0, 2, 56, 76, }, - { 9, 1, 0, 2, 56, 62, }, + { 9, 1, 0, 2, 56, 60, }, { 0, 1, 0, 2, 60, 76, }, { 2, 1, 0, 2, 60, 58, }, { 1, 1, 0, 2, 60, 62, }, { 3, 1, 0, 2, 60, 64, }, { 4, 1, 0, 2, 60, 76, }, { 5, 1, 0, 2, 60, 58, }, - { 6, 1, 0, 2, 60, 76, }, - { 7, 1, 0, 2, 60, 54, }, - { 8, 1, 0, 2, 60, 76, }, - { 9, 1, 0, 2, 60, 62, }, + { 9, 1, 0, 2, 60, 60, }, { 0, 1, 0, 2, 64, 70, }, { 2, 1, 0, 2, 64, 58, }, { 1, 1, 0, 2, 64, 62, }, { 3, 1, 0, 2, 64, 64, }, { 4, 1, 0, 2, 64, 74, }, { 5, 1, 0, 2, 64, 58, }, - { 6, 1, 0, 2, 64, 74, }, - { 7, 1, 0, 2, 64, 54, }, - { 8, 1, 0, 2, 64, 74, }, - { 9, 1, 0, 2, 64, 62, }, + { 9, 1, 0, 2, 64, 60, }, { 0, 1, 0, 2, 100, 66, }, { 2, 1, 0, 2, 100, 58, }, { 1, 1, 0, 2, 100, 76, }, { 3, 1, 0, 2, 100, 66, }, { 4, 1, 0, 2, 100, 76, }, { 5, 1, 0, 2, 100, 58, }, - { 6, 1, 0, 2, 100, 70, }, - { 7, 1, 0, 2, 100, 54, }, - { 8, 1, 0, 2, 100, 70, }, { 9, 1, 0, 2, 100, 127, }, { 0, 1, 0, 2, 104, 76, }, { 2, 1, 0, 2, 104, 58, }, @@ -42963,9 +42999,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 104, 76, }, { 4, 1, 0, 2, 104, 76, }, { 5, 1, 0, 2, 104, 58, }, - { 6, 1, 0, 2, 104, 76, }, - { 7, 1, 0, 2, 104, 54, }, - { 8, 1, 0, 2, 104, 76, }, { 9, 1, 0, 2, 104, 127, }, { 0, 1, 0, 2, 108, 76, }, { 2, 1, 0, 2, 108, 58, }, @@ -42973,9 +43006,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 108, 76, }, { 4, 1, 0, 2, 108, 76, }, { 5, 1, 0, 2, 108, 58, }, - { 6, 1, 0, 2, 108, 76, }, - { 7, 1, 0, 2, 108, 54, }, - { 8, 1, 0, 2, 108, 76, }, { 9, 1, 0, 2, 108, 127, }, { 0, 1, 0, 2, 112, 76, }, { 2, 1, 0, 2, 112, 58, }, @@ -42983,9 +43013,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 112, 76, }, { 4, 1, 0, 2, 112, 76, }, { 5, 1, 0, 2, 112, 58, }, - { 6, 1, 0, 2, 112, 76, }, - { 7, 1, 0, 2, 112, 54, }, - { 8, 1, 0, 2, 112, 76, }, { 9, 1, 0, 2, 112, 127, }, { 0, 1, 0, 2, 116, 76, }, { 2, 1, 0, 2, 116, 58, }, @@ -42993,9 +43020,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 116, 76, }, { 4, 1, 0, 2, 116, 76, }, { 5, 1, 0, 2, 116, 58, }, - { 6, 1, 0, 2, 116, 76, }, - { 7, 1, 0, 2, 116, 54, }, - { 8, 1, 0, 2, 116, 76, }, { 9, 1, 0, 2, 116, 127, }, { 0, 1, 0, 2, 120, 76, }, { 2, 1, 0, 2, 120, 58, }, @@ -43003,9 +43027,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 120, 127, }, { 4, 1, 0, 2, 120, 76, }, { 5, 1, 0, 2, 120, 127, }, - { 6, 1, 0, 2, 120, 76, }, - { 7, 1, 0, 2, 120, 54, }, - { 8, 1, 0, 2, 120, 76, }, { 9, 1, 0, 2, 120, 127, }, { 0, 1, 0, 2, 124, 76, }, { 2, 1, 0, 2, 124, 58, }, @@ -43013,9 +43034,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 124, 127, }, { 4, 1, 0, 2, 124, 76, }, { 5, 1, 0, 2, 124, 127, }, - { 6, 1, 0, 2, 124, 76, }, - { 7, 1, 0, 2, 124, 54, }, - { 8, 1, 0, 2, 124, 76, }, { 9, 1, 0, 2, 124, 127, }, { 0, 1, 0, 2, 128, 76, }, { 2, 1, 0, 2, 128, 58, }, @@ -43023,9 +43041,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 128, 127, }, { 4, 1, 0, 2, 128, 76, }, { 5, 1, 0, 2, 128, 127, }, - { 6, 1, 0, 2, 128, 76, }, - { 7, 1, 0, 2, 128, 54, }, - { 8, 1, 0, 2, 128, 76, }, { 9, 1, 0, 2, 128, 127, }, { 0, 1, 0, 2, 132, 76, }, { 2, 1, 0, 2, 132, 58, }, @@ -43033,9 +43048,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 132, 76, }, { 4, 1, 0, 2, 132, 76, }, { 5, 1, 0, 2, 132, 58, }, - { 6, 1, 0, 2, 132, 76, }, - { 7, 1, 0, 2, 132, 54, }, - { 8, 1, 0, 2, 132, 76, }, { 9, 1, 0, 2, 132, 127, }, { 0, 1, 0, 2, 136, 76, }, { 2, 1, 0, 2, 136, 58, }, @@ -43043,9 +43055,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 136, 76, }, { 4, 1, 0, 2, 136, 76, }, { 5, 1, 0, 2, 136, 58, }, - { 6, 1, 0, 2, 136, 76, }, - { 7, 1, 0, 2, 136, 54, }, - { 8, 1, 0, 2, 136, 76, }, { 9, 1, 0, 2, 136, 127, }, { 0, 1, 0, 2, 140, 66, }, { 2, 1, 0, 2, 140, 58, }, @@ -43053,9 +43062,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 140, 66, }, { 4, 1, 0, 2, 140, 76, }, { 5, 1, 0, 2, 140, 58, }, - { 6, 1, 0, 2, 140, 70, }, - { 7, 1, 0, 2, 140, 54, }, - { 8, 1, 0, 2, 140, 70, }, { 9, 1, 0, 2, 140, 127, }, { 0, 1, 0, 2, 144, 76, }, { 2, 1, 0, 2, 144, 127, }, @@ -43063,9 +43069,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 144, 76, }, { 4, 1, 0, 2, 144, 76, }, { 5, 1, 0, 2, 144, 127, }, - { 6, 1, 0, 2, 144, 76, }, - { 7, 1, 0, 2, 144, 127, }, - { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, { 0, 1, 0, 2, 149, 76, }, { 2, 1, 0, 2, 149, 28, }, @@ -43073,139 +43076,97 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, { 5, 1, 0, 2, 149, 76, }, - { 6, 1, 0, 2, 149, 76, }, - { 7, 1, 0, 2, 149, 54, }, - { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, 28, }, + { 9, 1, 0, 2, 149, 76, }, { 0, 1, 0, 2, 153, 76, }, { 2, 1, 0, 2, 153, 28, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, { 5, 1, 0, 2, 153, 76, }, - { 6, 1, 0, 2, 153, 76, }, - { 7, 1, 0, 2, 153, 54, }, - { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, 28, }, + { 9, 1, 0, 2, 153, 76, }, { 0, 1, 0, 2, 157, 76, }, { 2, 1, 0, 2, 157, 28, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, { 5, 1, 0, 2, 157, 76, }, - { 6, 1, 0, 2, 157, 76, }, - { 7, 1, 0, 2, 157, 54, }, - { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, 28, }, + { 9, 1, 0, 2, 157, 76, }, { 0, 1, 0, 2, 161, 76, }, { 2, 1, 0, 2, 161, 28, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, { 5, 1, 0, 2, 161, 76, }, - { 6, 1, 0, 2, 161, 76, }, - { 7, 1, 0, 2, 161, 54, }, - { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, 28, }, + { 9, 1, 0, 2, 161, 76, }, { 0, 1, 0, 2, 165, 76, }, { 2, 1, 0, 2, 165, 28, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, { 5, 1, 0, 2, 165, 76, }, - { 6, 1, 0, 2, 165, 76, }, - { 7, 1, 0, 2, 165, 54, }, - { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, 28, }, + { 9, 1, 0, 2, 165, 76, }, { 0, 1, 0, 3, 36, 64, }, { 2, 1, 0, 3, 36, 36, }, { 1, 1, 0, 3, 36, 50, }, { 3, 1, 0, 3, 36, 38, }, { 4, 1, 0, 3, 36, 66, }, { 5, 1, 0, 3, 36, 36, }, - { 6, 1, 0, 3, 36, 52, }, - { 7, 1, 0, 3, 36, 30, }, - { 8, 1, 0, 3, 36, 50, }, - { 9, 1, 0, 3, 36, 38, }, + { 9, 1, 0, 3, 36, 36, }, { 0, 1, 0, 3, 40, 68, }, { 2, 1, 0, 3, 40, 36, }, { 1, 1, 0, 3, 40, 50, }, { 3, 1, 0, 3, 40, 38, }, { 4, 1, 0, 3, 40, 66, }, { 5, 1, 0, 3, 40, 36, }, - { 6, 1, 0, 3, 40, 52, }, - { 7, 1, 0, 3, 40, 30, }, - { 8, 1, 0, 3, 40, 50, }, - { 9, 1, 0, 3, 40, 38, }, + { 9, 1, 0, 3, 40, 36, }, { 0, 1, 0, 3, 44, 68, }, { 2, 1, 0, 3, 44, 36, }, { 1, 1, 0, 3, 44, 50, }, { 3, 1, 0, 3, 44, 38, }, { 4, 1, 0, 3, 44, 66, }, { 5, 1, 0, 3, 44, 36, }, - { 6, 1, 0, 3, 44, 52, }, - { 7, 1, 0, 3, 44, 30, }, - { 8, 1, 0, 3, 44, 50, }, - { 9, 1, 0, 3, 44, 38, }, + { 9, 1, 0, 3, 44, 36, }, { 0, 1, 0, 3, 48, 68, }, { 2, 1, 0, 3, 48, 36, }, { 1, 1, 0, 3, 48, 50, }, { 3, 1, 0, 3, 48, 38, }, { 4, 1, 0, 3, 48, 42, }, { 5, 1, 0, 3, 48, 36, }, - { 6, 1, 0, 3, 48, 52, }, - { 7, 1, 0, 3, 48, 30, }, - { 8, 1, 0, 3, 48, 50, }, - { 9, 1, 0, 3, 48, 38, }, + { 9, 1, 0, 3, 48, 36, }, { 0, 1, 0, 3, 52, 68, }, { 2, 1, 0, 3, 52, 36, }, { 1, 1, 0, 3, 52, 50, }, { 3, 1, 0, 3, 52, 40, }, { 4, 1, 0, 3, 52, 66, }, { 5, 1, 0, 3, 52, 36, }, - { 6, 1, 0, 3, 52, 68, }, - { 7, 1, 0, 3, 52, 30, }, - { 8, 1, 0, 3, 52, 68, }, - { 9, 1, 0, 3, 52, 38, }, + { 9, 1, 0, 3, 52, 36, }, { 0, 1, 0, 3, 56, 68, }, { 2, 1, 0, 3, 56, 36, }, { 1, 1, 0, 3, 56, 50, }, { 3, 1, 0, 3, 56, 40, }, { 4, 1, 0, 3, 56, 66, }, { 5, 1, 0, 3, 56, 36, }, - { 6, 1, 0, 3, 56, 68, }, - { 7, 1, 0, 3, 56, 30, }, - { 8, 1, 0, 3, 56, 68, }, - { 9, 1, 0, 3, 56, 38, }, + { 9, 1, 0, 3, 56, 36, }, { 0, 1, 0, 3, 60, 68, }, { 2, 1, 0, 3, 60, 36, }, { 1, 1, 0, 3, 60, 50, }, { 3, 1, 0, 3, 60, 40, }, { 4, 1, 0, 3, 60, 66, }, { 5, 1, 0, 3, 60, 36, }, - { 6, 1, 0, 3, 60, 66, }, - { 7, 1, 0, 3, 60, 30, }, - { 8, 1, 0, 3, 60, 66, }, - { 9, 1, 0, 3, 60, 38, }, + { 9, 1, 0, 3, 60, 36, }, { 0, 1, 0, 3, 64, 66, }, { 2, 1, 0, 3, 64, 36, }, { 1, 1, 0, 3, 64, 50, }, { 3, 1, 0, 3, 64, 40, }, { 4, 1, 0, 3, 64, 66, }, { 5, 1, 0, 3, 64, 36, }, - { 6, 1, 0, 3, 64, 68, }, - { 7, 1, 0, 3, 64, 30, }, - { 8, 1, 0, 3, 64, 68, }, - { 9, 1, 0, 3, 64, 38, }, + { 9, 1, 0, 3, 64, 36, }, { 0, 1, 0, 3, 100, 64, }, { 2, 1, 0, 3, 100, 36, }, { 1, 1, 0, 3, 100, 70, }, { 3, 1, 0, 3, 100, 64, }, { 4, 1, 0, 3, 100, 66, }, { 5, 1, 0, 3, 100, 36, }, - { 6, 1, 0, 3, 100, 60, }, - { 7, 1, 0, 3, 100, 30, }, - { 8, 1, 0, 3, 100, 60, }, { 9, 1, 0, 3, 100, 127, }, { 0, 1, 0, 3, 104, 68, }, { 2, 1, 0, 3, 104, 36, }, @@ -43213,9 +43174,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 104, 68, }, { 4, 1, 0, 3, 104, 66, }, { 5, 1, 0, 3, 104, 36, }, - { 6, 1, 0, 3, 104, 68, }, - { 7, 1, 0, 3, 104, 30, }, - { 8, 1, 0, 3, 104, 68, }, { 9, 1, 0, 3, 104, 127, }, { 0, 1, 0, 3, 108, 68, }, { 2, 1, 0, 3, 108, 36, }, @@ -43223,9 +43181,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 108, 68, }, { 4, 1, 0, 3, 108, 66, }, { 5, 1, 0, 3, 108, 36, }, - { 6, 1, 0, 3, 108, 68, }, - { 7, 1, 0, 3, 108, 30, }, - { 8, 1, 0, 3, 108, 68, }, { 9, 1, 0, 3, 108, 127, }, { 0, 1, 0, 3, 112, 68, }, { 2, 1, 0, 3, 112, 36, }, @@ -43233,9 +43188,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 112, 68, }, { 4, 1, 0, 3, 112, 66, }, { 5, 1, 0, 3, 112, 36, }, - { 6, 1, 0, 3, 112, 68, }, - { 7, 1, 0, 3, 112, 30, }, - { 8, 1, 0, 3, 112, 68, }, { 9, 1, 0, 3, 112, 127, }, { 0, 1, 0, 3, 116, 68, }, { 2, 1, 0, 3, 116, 36, }, @@ -43243,9 +43195,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 116, 68, }, { 4, 1, 0, 3, 116, 66, }, { 5, 1, 0, 3, 116, 36, }, - { 6, 1, 0, 3, 116, 68, }, - { 7, 1, 0, 3, 116, 30, }, - { 8, 1, 0, 3, 116, 68, }, { 9, 1, 0, 3, 116, 127, }, { 0, 1, 0, 3, 120, 68, }, { 2, 1, 0, 3, 120, 36, }, @@ -43253,9 +43202,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 120, 127, }, { 4, 1, 0, 3, 120, 66, }, { 5, 1, 0, 3, 120, 127, }, - { 6, 1, 0, 3, 120, 68, }, - { 7, 1, 0, 3, 120, 30, }, - { 8, 1, 0, 3, 120, 68, }, { 9, 1, 0, 3, 120, 127, }, { 0, 1, 0, 3, 124, 68, }, { 2, 1, 0, 3, 124, 36, }, @@ -43263,9 +43209,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 124, 127, }, { 4, 1, 0, 3, 124, 66, }, { 5, 1, 0, 3, 124, 127, }, - { 6, 1, 0, 3, 124, 68, }, - { 7, 1, 0, 3, 124, 30, }, - { 8, 1, 0, 3, 124, 68, }, { 9, 1, 0, 3, 124, 127, }, { 0, 1, 0, 3, 128, 68, }, { 2, 1, 0, 3, 128, 36, }, @@ -43273,9 +43216,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 128, 127, }, { 4, 1, 0, 3, 128, 66, }, { 5, 1, 0, 3, 128, 127, }, - { 6, 1, 0, 3, 128, 68, }, - { 7, 1, 0, 3, 128, 30, }, - { 8, 1, 0, 3, 128, 68, }, { 9, 1, 0, 3, 128, 127, }, { 0, 1, 0, 3, 132, 68, }, { 2, 1, 0, 3, 132, 36, }, @@ -43283,9 +43223,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 132, 68, }, { 4, 1, 0, 3, 132, 66, }, { 5, 1, 0, 3, 132, 36, }, - { 6, 1, 0, 3, 132, 68, }, - { 7, 1, 0, 3, 132, 30, }, - { 8, 1, 0, 3, 132, 68, }, { 9, 1, 0, 3, 132, 127, }, { 0, 1, 0, 3, 136, 68, }, { 2, 1, 0, 3, 136, 36, }, @@ -43293,9 +43230,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 136, 68, }, { 4, 1, 0, 3, 136, 66, }, { 5, 1, 0, 3, 136, 36, }, - { 6, 1, 0, 3, 136, 68, }, - { 7, 1, 0, 3, 136, 30, }, - { 8, 1, 0, 3, 136, 68, }, { 9, 1, 0, 3, 136, 127, }, { 0, 1, 0, 3, 140, 58, }, { 2, 1, 0, 3, 140, 36, }, @@ -43303,9 +43237,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 140, 58, }, { 4, 1, 0, 3, 140, 66, }, { 5, 1, 0, 3, 140, 36, }, - { 6, 1, 0, 3, 140, 60, }, - { 7, 1, 0, 3, 140, 30, }, - { 8, 1, 0, 3, 140, 60, }, { 9, 1, 0, 3, 140, 127, }, { 0, 1, 0, 3, 144, 68, }, { 2, 1, 0, 3, 144, 127, }, @@ -43313,9 +43244,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 144, 68, }, { 4, 1, 0, 3, 144, 66, }, { 5, 1, 0, 3, 144, 127, }, - { 6, 1, 0, 3, 144, 68, }, - { 7, 1, 0, 3, 144, 127, }, - { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, { 0, 1, 0, 3, 149, 76, }, { 2, 1, 0, 3, 149, 4, }, @@ -43323,59 +43251,41 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 149, 76, }, { 4, 1, 0, 3, 149, 62, }, { 5, 1, 0, 3, 149, 76, }, - { 6, 1, 0, 3, 149, 76, }, - { 7, 1, 0, 3, 149, 30, }, - { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, 4, }, + { 9, 1, 0, 3, 149, 68, }, { 0, 1, 0, 3, 153, 76, }, { 2, 1, 0, 3, 153, 4, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, { 4, 1, 0, 3, 153, 62, }, { 5, 1, 0, 3, 153, 76, }, - { 6, 1, 0, 3, 153, 76, }, - { 7, 1, 0, 3, 153, 30, }, - { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, 4, }, + { 9, 1, 0, 3, 153, 68, }, { 0, 1, 0, 3, 157, 76, }, { 2, 1, 0, 3, 157, 4, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, { 4, 1, 0, 3, 157, 62, }, { 5, 1, 0, 3, 157, 76, }, - { 6, 1, 0, 3, 157, 76, }, - { 7, 1, 0, 3, 157, 30, }, - { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, 4, }, + { 9, 1, 0, 3, 157, 68, }, { 0, 1, 0, 3, 161, 76, }, { 2, 1, 0, 3, 161, 4, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, { 4, 1, 0, 3, 161, 62, }, { 5, 1, 0, 3, 161, 76, }, - { 6, 1, 0, 3, 161, 76, }, - { 7, 1, 0, 3, 161, 30, }, - { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, 4, }, + { 9, 1, 0, 3, 161, 72, }, { 0, 1, 0, 3, 165, 76, }, { 2, 1, 0, 3, 165, 4, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, { 4, 1, 0, 3, 165, 62, }, { 5, 1, 0, 3, 165, 76, }, - { 6, 1, 0, 3, 165, 76, }, - { 7, 1, 0, 3, 165, 30, }, - { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, 4, }, + { 9, 1, 0, 3, 165, 72, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, { 1, 1, 1, 2, 38, 64, }, { 3, 1, 1, 2, 38, 64, }, { 4, 1, 1, 2, 38, 64, }, { 5, 1, 1, 2, 38, 64, }, - { 6, 1, 1, 2, 38, 64, }, - { 7, 1, 1, 2, 38, 54, }, - { 8, 1, 1, 2, 38, 62, }, { 9, 1, 1, 2, 38, 64, }, { 0, 1, 1, 2, 46, 72, }, { 2, 1, 1, 2, 46, 64, }, @@ -43383,9 +43293,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 46, 64, }, { 4, 1, 1, 2, 46, 70, }, { 5, 1, 1, 2, 46, 64, }, - { 6, 1, 1, 2, 46, 64, }, - { 7, 1, 1, 2, 46, 54, }, - { 8, 1, 1, 2, 46, 62, }, { 9, 1, 1, 2, 46, 64, }, { 0, 1, 1, 2, 54, 72, }, { 2, 1, 1, 2, 54, 64, }, @@ -43393,9 +43300,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 54, 64, }, { 4, 1, 1, 2, 54, 72, }, { 5, 1, 1, 2, 54, 64, }, - { 6, 1, 1, 2, 54, 72, }, - { 7, 1, 1, 2, 54, 54, }, - { 8, 1, 1, 2, 54, 72, }, { 9, 1, 1, 2, 54, 64, }, { 0, 1, 1, 2, 62, 60, }, { 2, 1, 1, 2, 62, 64, }, @@ -43403,9 +43307,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 62, 60, }, { 4, 1, 1, 2, 62, 60, }, { 5, 1, 1, 2, 62, 64, }, - { 6, 1, 1, 2, 62, 64, }, - { 7, 1, 1, 2, 62, 54, }, - { 8, 1, 1, 2, 62, 64, }, { 9, 1, 1, 2, 62, 64, }, { 0, 1, 1, 2, 102, 60, }, { 2, 1, 1, 2, 102, 64, }, @@ -43413,9 +43314,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 102, 60, }, { 4, 1, 1, 2, 102, 64, }, { 5, 1, 1, 2, 102, 64, }, - { 6, 1, 1, 2, 102, 58, }, - { 7, 1, 1, 2, 102, 54, }, - { 8, 1, 1, 2, 102, 58, }, { 9, 1, 1, 2, 102, 127, }, { 0, 1, 1, 2, 110, 72, }, { 2, 1, 1, 2, 110, 64, }, @@ -43423,9 +43321,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 110, 72, }, { 4, 1, 1, 2, 110, 72, }, { 5, 1, 1, 2, 110, 64, }, - { 6, 1, 1, 2, 110, 72, }, - { 7, 1, 1, 2, 110, 54, }, - { 8, 1, 1, 2, 110, 72, }, { 9, 1, 1, 2, 110, 127, }, { 0, 1, 1, 2, 118, 72, }, { 2, 1, 1, 2, 118, 64, }, @@ -43433,9 +43328,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 118, 127, }, { 4, 1, 1, 2, 118, 72, }, { 5, 1, 1, 2, 118, 127, }, - { 6, 1, 1, 2, 118, 72, }, - { 7, 1, 1, 2, 118, 54, }, - { 8, 1, 1, 2, 118, 72, }, { 9, 1, 1, 2, 118, 127, }, { 0, 1, 1, 2, 126, 72, }, { 2, 1, 1, 2, 126, 64, }, @@ -43443,9 +43335,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 126, 127, }, { 4, 1, 1, 2, 126, 72, }, { 5, 1, 1, 2, 126, 127, }, - { 6, 1, 1, 2, 126, 72, }, - { 7, 1, 1, 2, 126, 54, }, - { 8, 1, 1, 2, 126, 72, }, { 9, 1, 1, 2, 126, 127, }, { 0, 1, 1, 2, 134, 72, }, { 2, 1, 1, 2, 134, 64, }, @@ -43453,9 +43342,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 134, 72, }, { 4, 1, 1, 2, 134, 72, }, { 5, 1, 1, 2, 134, 64, }, - { 6, 1, 1, 2, 134, 72, }, - { 7, 1, 1, 2, 134, 54, }, - { 8, 1, 1, 2, 134, 72, }, { 9, 1, 1, 2, 134, 127, }, { 0, 1, 1, 2, 142, 72, }, { 2, 1, 1, 2, 142, 127, }, @@ -43463,9 +43349,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 142, 72, }, { 4, 1, 1, 2, 142, 72, }, { 5, 1, 1, 2, 142, 127, }, - { 6, 1, 1, 2, 142, 72, }, - { 7, 1, 1, 2, 142, 127, }, - { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, { 0, 1, 1, 2, 151, 72, }, { 2, 1, 1, 2, 151, 28, }, @@ -43473,29 +43356,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, { 5, 1, 1, 2, 151, 72, }, - { 6, 1, 1, 2, 151, 72, }, - { 7, 1, 1, 2, 151, 54, }, - { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, 28, }, + { 9, 1, 1, 2, 151, 72, }, { 0, 1, 1, 2, 159, 72, }, { 2, 1, 1, 2, 159, 28, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, { 5, 1, 1, 2, 159, 72, }, - { 6, 1, 1, 2, 159, 72, }, - { 7, 1, 1, 2, 159, 54, }, - { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, 28, }, + { 9, 1, 1, 2, 159, 72, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, { 3, 1, 1, 3, 38, 40, }, { 4, 1, 1, 3, 38, 54, }, { 5, 1, 1, 3, 38, 40, }, - { 6, 1, 1, 3, 38, 52, }, - { 7, 1, 1, 3, 38, 30, }, - { 8, 1, 1, 3, 38, 50, }, { 9, 1, 1, 3, 38, 40, }, { 0, 1, 1, 3, 46, 68, }, { 2, 1, 1, 3, 46, 40, }, @@ -43503,9 +43377,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 46, 40, }, { 4, 1, 1, 3, 46, 54, }, { 5, 1, 1, 3, 46, 40, }, - { 6, 1, 1, 3, 46, 52, }, - { 7, 1, 1, 3, 46, 30, }, - { 8, 1, 1, 3, 46, 50, }, { 9, 1, 1, 3, 46, 40, }, { 0, 1, 1, 3, 54, 68, }, { 2, 1, 1, 3, 54, 40, }, @@ -43513,9 +43384,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 54, 40, }, { 4, 1, 1, 3, 54, 66, }, { 5, 1, 1, 3, 54, 40, }, - { 6, 1, 1, 3, 54, 68, }, - { 7, 1, 1, 3, 54, 30, }, - { 8, 1, 1, 3, 54, 68, }, { 9, 1, 1, 3, 54, 40, }, { 0, 1, 1, 3, 62, 58, }, { 2, 1, 1, 3, 62, 40, }, @@ -43523,9 +43391,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 62, 40, }, { 4, 1, 1, 3, 62, 50, }, { 5, 1, 1, 3, 62, 40, }, - { 6, 1, 1, 3, 62, 58, }, - { 7, 1, 1, 3, 62, 30, }, - { 8, 1, 1, 3, 62, 58, }, { 9, 1, 1, 3, 62, 40, }, { 0, 1, 1, 3, 102, 56, }, { 2, 1, 1, 3, 102, 40, }, @@ -43533,9 +43398,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 102, 56, }, { 4, 1, 1, 3, 102, 54, }, { 5, 1, 1, 3, 102, 40, }, - { 6, 1, 1, 3, 102, 54, }, - { 7, 1, 1, 3, 102, 30, }, - { 8, 1, 1, 3, 102, 54, }, { 9, 1, 1, 3, 102, 127, }, { 0, 1, 1, 3, 110, 68, }, { 2, 1, 1, 3, 110, 40, }, @@ -43543,9 +43405,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 110, 68, }, { 4, 1, 1, 3, 110, 66, }, { 5, 1, 1, 3, 110, 40, }, - { 6, 1, 1, 3, 110, 68, }, - { 7, 1, 1, 3, 110, 30, }, - { 8, 1, 1, 3, 110, 68, }, { 9, 1, 1, 3, 110, 127, }, { 0, 1, 1, 3, 118, 68, }, { 2, 1, 1, 3, 118, 40, }, @@ -43553,9 +43412,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 118, 127, }, { 4, 1, 1, 3, 118, 66, }, { 5, 1, 1, 3, 118, 127, }, - { 6, 1, 1, 3, 118, 68, }, - { 7, 1, 1, 3, 118, 30, }, - { 8, 1, 1, 3, 118, 68, }, { 9, 1, 1, 3, 118, 127, }, { 0, 1, 1, 3, 126, 68, }, { 2, 1, 1, 3, 126, 40, }, @@ -43563,9 +43419,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 126, 127, }, { 4, 1, 1, 3, 126, 66, }, { 5, 1, 1, 3, 126, 127, }, - { 6, 1, 1, 3, 126, 68, }, - { 7, 1, 1, 3, 126, 30, }, - { 8, 1, 1, 3, 126, 68, }, { 9, 1, 1, 3, 126, 127, }, { 0, 1, 1, 3, 134, 68, }, { 2, 1, 1, 3, 134, 40, }, @@ -43573,9 +43426,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 134, 68, }, { 4, 1, 1, 3, 134, 66, }, { 5, 1, 1, 3, 134, 40, }, - { 6, 1, 1, 3, 134, 68, }, - { 7, 1, 1, 3, 134, 30, }, - { 8, 1, 1, 3, 134, 68, }, { 9, 1, 1, 3, 134, 127, }, { 0, 1, 1, 3, 142, 68, }, { 2, 1, 1, 3, 142, 127, }, @@ -43583,9 +43433,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 142, 68, }, { 4, 1, 1, 3, 142, 66, }, { 5, 1, 1, 3, 142, 127, }, - { 6, 1, 1, 3, 142, 68, }, - { 7, 1, 1, 3, 142, 127, }, - { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, { 0, 1, 1, 3, 151, 72, }, { 2, 1, 1, 3, 151, 4, }, @@ -43593,29 +43440,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, { 5, 1, 1, 3, 151, 72, }, - { 6, 1, 1, 3, 151, 72, }, - { 7, 1, 1, 3, 151, 30, }, - { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, 4, }, + { 9, 1, 1, 3, 151, 64, }, { 0, 1, 1, 3, 159, 72, }, { 2, 1, 1, 3, 159, 4, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, { 5, 1, 1, 3, 159, 72, }, - { 6, 1, 1, 3, 159, 72, }, - { 7, 1, 1, 3, 159, 30, }, - { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, 4, }, + { 9, 1, 1, 3, 159, 72, }, { 0, 1, 2, 4, 42, 68, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, { 3, 1, 2, 4, 42, 64, }, { 4, 1, 2, 4, 42, 60, }, { 5, 1, 2, 4, 42, 64, }, - { 6, 1, 2, 4, 42, 64, }, - { 7, 1, 2, 4, 42, 54, }, - { 8, 1, 2, 4, 42, 62, }, { 9, 1, 2, 4, 42, 64, }, { 0, 1, 2, 4, 58, 60, }, { 2, 1, 2, 4, 58, 64, }, @@ -43623,9 +43461,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 58, 60, }, { 4, 1, 2, 4, 58, 56, }, { 5, 1, 2, 4, 58, 64, }, - { 6, 1, 2, 4, 58, 62, }, - { 7, 1, 2, 4, 58, 54, }, - { 8, 1, 2, 4, 58, 62, }, { 9, 1, 2, 4, 58, 64, }, { 0, 1, 2, 4, 106, 60, }, { 2, 1, 2, 4, 106, 64, }, @@ -43633,9 +43468,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 106, 60, }, { 4, 1, 2, 4, 106, 58, }, { 5, 1, 2, 4, 106, 64, }, - { 6, 1, 2, 4, 106, 58, }, - { 7, 1, 2, 4, 106, 54, }, - { 8, 1, 2, 4, 106, 58, }, { 9, 1, 2, 4, 106, 127, }, { 0, 1, 2, 4, 122, 72, }, { 2, 1, 2, 4, 122, 64, }, @@ -43643,9 +43475,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 122, 127, }, { 4, 1, 2, 4, 122, 68, }, { 5, 1, 2, 4, 122, 127, }, - { 6, 1, 2, 4, 122, 72, }, - { 7, 1, 2, 4, 122, 54, }, - { 8, 1, 2, 4, 122, 72, }, { 9, 1, 2, 4, 122, 127, }, { 0, 1, 2, 4, 138, 72, }, { 2, 1, 2, 4, 138, 127, }, @@ -43653,9 +43482,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 138, 72, }, { 4, 1, 2, 4, 138, 70, }, { 5, 1, 2, 4, 138, 127, }, - { 6, 1, 2, 4, 138, 72, }, - { 7, 1, 2, 4, 138, 127, }, - { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, { 0, 1, 2, 4, 155, 72, }, { 2, 1, 2, 4, 155, 28, }, @@ -43663,19 +43489,13 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 155, 72, }, { 4, 1, 2, 4, 155, 62, }, { 5, 1, 2, 4, 155, 72, }, - { 6, 1, 2, 4, 155, 72, }, - { 7, 1, 2, 4, 155, 54, }, - { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, 28, }, + { 9, 1, 2, 4, 155, 72, }, { 0, 1, 2, 5, 42, 56, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, { 3, 1, 2, 5, 42, 40, }, { 4, 1, 2, 5, 42, 50, }, { 5, 1, 2, 5, 42, 40, }, - { 6, 1, 2, 5, 42, 52, }, - { 7, 1, 2, 5, 42, 30, }, - { 8, 1, 2, 5, 42, 50, }, { 9, 1, 2, 5, 42, 40, }, { 0, 1, 2, 5, 58, 54, }, { 2, 1, 2, 5, 58, 40, }, @@ -43683,9 +43503,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 58, 40, }, { 4, 1, 2, 5, 58, 46, }, { 5, 1, 2, 5, 58, 40, }, - { 6, 1, 2, 5, 58, 52, }, - { 7, 1, 2, 5, 58, 30, }, - { 8, 1, 2, 5, 58, 52, }, { 9, 1, 2, 5, 58, 40, }, { 0, 1, 2, 5, 106, 48, }, { 2, 1, 2, 5, 106, 40, }, @@ -43693,9 +43510,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 106, 48, }, { 4, 1, 2, 5, 106, 50, }, { 5, 1, 2, 5, 106, 40, }, - { 6, 1, 2, 5, 106, 50, }, - { 7, 1, 2, 5, 106, 30, }, - { 8, 1, 2, 5, 106, 50, }, { 9, 1, 2, 5, 106, 127, }, { 0, 1, 2, 5, 122, 70, }, { 2, 1, 2, 5, 122, 40, }, @@ -43703,9 +43517,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 122, 127, }, { 4, 1, 2, 5, 122, 62, }, { 5, 1, 2, 5, 122, 127, }, - { 6, 1, 2, 5, 122, 66, }, - { 7, 1, 2, 5, 122, 30, }, - { 8, 1, 2, 5, 122, 66, }, { 9, 1, 2, 5, 122, 127, }, { 0, 1, 2, 5, 138, 70, }, { 2, 1, 2, 5, 138, 127, }, @@ -43713,9 +43524,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 138, 70, }, { 4, 1, 2, 5, 138, 62, }, { 5, 1, 2, 5, 138, 127, }, - { 6, 1, 2, 5, 138, 66, }, - { 7, 1, 2, 5, 138, 127, }, - { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, { 0, 1, 2, 5, 155, 72, }, { 2, 1, 2, 5, 155, 4, }, @@ -43723,10 +43531,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 155, 72, }, { 4, 1, 2, 5, 155, 52, }, { 5, 1, 2, 5, 155, 72, }, - { 6, 1, 2, 5, 155, 62, }, - { 7, 1, 2, 5, 155, 30, }, - { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, 4, }, + { 9, 1, 2, 5, 155, 66, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type5); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c index af28ca09d4..157d5102a4 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c @@ -25,7 +25,7 @@ static const struct usb_device_id rtw_8822cu_id_table[] = { }; MODULE_DEVICE_TABLE(usb, rtw_8822cu_id_table); -static int rtw8822bu_probe(struct usb_interface *intf, +static int rtw8822cu_probe(struct usb_interface *intf, const struct usb_device_id *id) { return rtw_usb_probe(intf, id); @@ -34,7 +34,7 @@ static int rtw8822bu_probe(struct usb_interface *intf, static struct usb_driver rtw_8822cu_driver = { .name = "rtw_8822cu", .id_table = rtw_8822cu_id_table, - .probe = rtw8822bu_probe, + .probe = rtw8822cu_probe, .disconnect = rtw_usb_disconnect, }; module_usb_driver(rtw_8822cu_driver); diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index e1bc3606f9..cbf6821af6 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -3,8 +3,10 @@ */ #include "chan.h" +#include "coex.h" #include "debug.h" #include "fw.h" +#include "mac.h" #include "ps.h" #include "util.h" @@ -85,6 +87,19 @@ static enum rtw89_sc_offset rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw, return primary_chan_idx; } +static u8 rtw89_get_primary_sb_idx(u8 central_ch, u8 pri_ch, + enum rtw89_bandwidth bw) +{ + static const u8 prisb_cal_ofst[RTW89_CHANNEL_WIDTH_ORDINARY_NUM] = { + 0, 2, 6, 14, 30 + }; + + if (bw >= RTW89_CHANNEL_WIDTH_ORDINARY_NUM) + return 0; + + return (prisb_cal_ofst[bw] + pri_ch - central_ch) / 4; +} + void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, enum rtw89_band band, enum rtw89_bandwidth bandwidth) { @@ -104,6 +119,8 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, chan->subband_type = rtw89_get_subband_type(band, center_chan); chan->pri_ch_idx = rtw89_get_primary_chan_idx(bandwidth, center_freq, primary_freq); + chan->pri_sb_idx = rtw89_get_primary_sb_idx(center_chan, primary_chan, + bandwidth); } bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, @@ -188,7 +205,9 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + hal->entity_pause = false; bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); + bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES); atomic_set(&hal->roc_entity_idx, RTW89_SUB_ENTITY_IDLE); rtw89_config_default_chandef(rtwdev); } @@ -203,6 +222,8 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) u8 last; u8 idx; + lockdep_assert_held(&rtwdev->mutex); + weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); switch (weight) { default: @@ -237,6 +258,9 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) rtw89_assign_entity_chan(rtwdev, idx, &chan); } + if (hal->entity_pause) + return rtw89_get_entity_mode(rtwdev); + rtw89_set_entity_mode(rtwdev, mode); return mode; } @@ -263,69 +287,1617 @@ static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev, } } -static int rtw89_mcc_start(struct rtw89_dev *rtwdev) +/* This function centrally manages how MCC roles are sorted and iterated. + * And, it guarantees that ordered_idx is less than NUM_OF_RTW89_MCC_ROLES. + * So, if data needs to pass an array for ordered_idx, the array can declare + * with NUM_OF_RTW89_MCC_ROLES. Besides, the entire iteration will stop + * immediately as long as iterator returns a non-zero value. + */ +static +int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev, + int (*iterator)(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data), + void *data) { - if (rtwdev->scanning) - rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role * const roles[] = { + &mcc->role_ref, + &mcc->role_aux, + }; + unsigned int idx; + int ret; - rtw89_leave_lps(rtwdev); + BUILD_BUG_ON(ARRAY_SIZE(roles) != NUM_OF_RTW89_MCC_ROLES); + + for (idx = 0; idx < NUM_OF_RTW89_MCC_ROLES; idx++) { + ret = iterator(rtwdev, roles[idx], idx, data); + if (ret) + return ret; + } - rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC start\n"); - rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_START); return 0; } -static void rtw89_mcc_stop(struct rtw89_dev *rtwdev) +/* For now, IEEE80211_HW_TIMING_BEACON_ONLY can make things simple to ensure + * correctness of MCC calculation logic below. We have noticed that once driver + * declares WIPHY_FLAG_SUPPORTS_MLO, the use of IEEE80211_HW_TIMING_BEACON_ONLY + * will be restricted. We will make an alternative in driver when it is ready + * for MLO. + */ +static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *role, u64 tsf) { - rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n"); - rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP); + struct rtw89_vif *rtwvif = role->rtwvif; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval); + u64 sync_tsf = vif->bss_conf.sync_tsf; + u32 remainder; + + if (tsf < sync_tsf) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC get tbtt ofst: tsf might not update yet\n"); + sync_tsf = 0; + } + + div_u64_rem(tsf - sync_tsf, bcn_intvl_us, &remainder); + + return remainder; } -void rtw89_chanctx_work(struct work_struct *work) +static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev) { - struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, - chanctx_work.work); - enum rtw89_entity_mode mode; + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mac_mcc_tsf_rpt rpt = {}; + struct rtw89_fw_mcc_tsf_req req = {}; + u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval); + u32 tbtt_ofst_ref, tbtt_ofst_aux; + u64 tsf_ref, tsf_aux; int ret; - mutex_lock(&rtwdev->mutex); + req.group = mcc->group; + req.macid_x = ref->rtwvif->mac_id; + req.macid_y = aux->rtwvif->mac_id; + ret = rtw89_fw_h2c_mcc_req_tsf(rtwdev, &req, &rpt); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to request tsf: %d\n", ret); + return RTW89_MCC_DFLT_BCN_OFST_TIME; + } - mode = rtw89_get_entity_mode(rtwdev); - switch (mode) { - case RTW89_ENTITY_MODE_MCC_PREPARE: - rtw89_set_entity_mode(rtwdev, RTW89_ENTITY_MODE_MCC); - rtw89_set_channel(rtwdev); + tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low; + tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low; + tbtt_ofst_ref = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf_ref); + tbtt_ofst_aux = rtw89_mcc_get_tbtt_ofst(rtwdev, aux, tsf_aux); - ret = rtw89_mcc_start(rtwdev); - if (ret) - rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret); - break; - default: - break; + while (tbtt_ofst_ref < tbtt_ofst_aux) + tbtt_ofst_ref += bcn_intvl_ref_us; + + return (tbtt_ofst_ref - tbtt_ofst_aux) / 1024; +} + +static +void rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role *mcc_role, + unsigned int bit) +{ + unsigned int idx = bit / 8; + unsigned int pos = bit % 8; + + if (idx >= ARRAY_SIZE(mcc_role->macid_bitmap)) + return; + + mcc_role->macid_bitmap[idx] |= BIT(pos); +} + +static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_vif *rtwvif = rtwsta->rtwvif; + struct rtw89_mcc_role *mcc_role = data; + struct rtw89_vif *target = mcc_role->rtwvif; + + if (rtwvif != target) + return; + + rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta->mac_id); +} + +static void rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role) +{ + struct rtw89_vif *rtwvif = mcc_role->rtwvif; + + rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwvif->mac_id); + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_mcc_role_macid_sta_iter, + mcc_role); +} + +static void rtw89_mcc_fill_role_policy(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role) +{ + struct rtw89_mcc_policy *policy = &mcc_role->policy; + + policy->c2h_rpt = RTW89_FW_MCC_C2H_RPT_ALL; + policy->tx_null_early = RTW89_MCC_DFLT_TX_NULL_EARLY; + policy->in_curr_ch = false; + policy->dis_sw_retry = true; + policy->sw_retry_count = false; + + if (mcc_role->is_go) + policy->dis_tx_null = true; + else + policy->dis_tx_null = false; +} + +static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(mcc_role->rtwvif); + struct ieee80211_p2p_noa_desc *noa_desc; + u32 bcn_intvl_us = ieee80211_tu_to_usec(mcc_role->beacon_interval); + u32 max_toa_us, max_tob_us, max_dur_us; + u32 start_time, interval, duration; + u64 tsf, tsf_lmt; + int ret; + int i; + + if (!mcc_role->is_go && !mcc_role->is_gc) + return; + + /* find the first periodic NoA */ + for (i = 0; i < RTW89_P2P_MAX_NOA_NUM; i++) { + noa_desc = &vif->bss_conf.p2p_noa_attr.desc[i]; + if (noa_desc->count == 255) + goto fill; } - mutex_unlock(&rtwdev->mutex); + return; + +fill: + start_time = le32_to_cpu(noa_desc->start_time); + interval = le32_to_cpu(noa_desc->interval); + duration = le32_to_cpu(noa_desc->duration); + + if (interval != bcn_intvl_us) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: mismatch interval: %d vs. %d\n", + interval, bcn_intvl_us); + return; + } + + ret = rtw89_mac_port_get_tsf(rtwdev, mcc_role->rtwvif, &tsf); + if (ret) { + rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); + return; + } + + tsf_lmt = (tsf & GENMASK_ULL(63, 32)) | start_time; + max_toa_us = rtw89_mcc_get_tbtt_ofst(rtwdev, mcc_role, tsf_lmt); + max_dur_us = interval - duration; + max_tob_us = max_dur_us - max_toa_us; + + if (!max_toa_us || !max_tob_us) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: hit boundary\n"); + return; + } + + if (max_dur_us < max_toa_us) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: insufficient duration\n"); + return; + } + + mcc_role->limit.max_toa = max_toa_us / 1024; + mcc_role->limit.max_tob = max_tob_us / 1024; + mcc_role->limit.max_dur = max_dur_us / 1024; + mcc_role->limit.enable = true; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: max_toa %d, max_tob %d, max_dur %d\n", + mcc_role->limit.max_toa, mcc_role->limit.max_tob, + mcc_role->limit.max_dur); } -void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev) +static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_mcc_role *role) { - enum rtw89_entity_mode mode; - u32 delay; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + const struct rtw89_chan *chan; - mode = rtw89_get_entity_mode(rtwdev); - switch (mode) { - default: + memset(role, 0, sizeof(*role)); + role->rtwvif = rtwvif; + role->beacon_interval = vif->bss_conf.beacon_int; + + if (!role->beacon_interval) { + rtw89_warn(rtwdev, + "cannot handle MCC role without beacon interval\n"); + return -EINVAL; + } + + role->duration = role->beacon_interval / 2; + + chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + role->is_2ghz = chan->band_type == RTW89_BAND_2G; + role->is_go = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_GO; + role->is_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; + + rtw89_mcc_fill_role_macid_bitmap(rtwdev, role); + rtw89_mcc_fill_role_policy(rtwdev, role); + rtw89_mcc_fill_role_limit(rtwdev, role); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role: bcn_intvl %d, is_2ghz %d, is_go %d, is_gc %d\n", + role->beacon_interval, role->is_2ghz, role->is_go, role->is_gc); + return 0; +} + +static void rtw89_mcc_fill_bt_role(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role; + + memset(bt_role, 0, sizeof(*bt_role)); + bt_role->duration = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC bt role: dur %d\n", + bt_role->duration); +} + +struct rtw89_mcc_fill_role_selector { + struct rtw89_vif *bind_vif[NUM_OF_RTW89_SUB_ENTITY]; +}; + +static_assert((u8)NUM_OF_RTW89_SUB_ENTITY >= NUM_OF_RTW89_MCC_ROLES); + +static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_fill_role_selector *sel = data; + struct rtw89_vif *role_vif = sel->bind_vif[ordered_idx]; + int ret; + + if (!role_vif) { + rtw89_warn(rtwdev, "cannot handle MCC without role[%d]\n", + ordered_idx); + return -EINVAL; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC fill role[%d] with vif \n", + ordered_idx, role_vif->mac_id); + + ret = rtw89_mcc_fill_role(rtwdev, role_vif, mcc_role); + if (ret) + return ret; + + return 0; +} + +static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_fill_role_selector sel = {}; + struct rtw89_vif *rtwvif; + int ret; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + if (sel.bind_vif[rtwvif->sub_entity_idx]) { + rtw89_warn(rtwdev, + "MCC skip extra vif on chanctx[%d]\n", + rtwvif->mac_id, rtwvif->sub_entity_idx); + continue; + } + + sel.bind_vif[rtwvif->sub_entity_idx] = rtwvif; + } + + ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel); + if (ret) + return ret; + + rtw89_mcc_fill_bt_role(rtwdev); + return 0; +} + +static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev, + const struct rtw89_mcc_pattern *new) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC assign pattern: ref {%d | %d}, aux {%d | %d}\n", + new->tob_ref, new->toa_ref, new->tob_aux, new->toa_aux); + + *pattern = *new; + memset(&pattern->courtesy, 0, sizeof(pattern->courtesy)); + + if (pattern->tob_aux <= 0 || pattern->toa_aux <= 0) { + pattern->courtesy.macid_tgt = aux->rtwvif->mac_id; + pattern->courtesy.macid_src = ref->rtwvif->mac_id; + pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; + pattern->courtesy.enable = true; + } else if (pattern->tob_ref <= 0 || pattern->toa_ref <= 0) { + pattern->courtesy.macid_tgt = ref->rtwvif->mac_id; + pattern->courtesy.macid_src = aux->rtwvif->mac_id; + pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; + pattern->courtesy.enable = true; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC pattern flags: plan %d, courtesy_en %d\n", + pattern->plan, pattern->courtesy.enable); + + if (!pattern->courtesy.enable) return; - case RTW89_ENTITY_MODE_MCC_PREPARE: - delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC_PREPARE); - break; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC pattern courtesy: tgt %d, src %d, slot %d\n", + pattern->courtesy.macid_tgt, pattern->courtesy.macid_src, + pattern->courtesy.slot_num); +} + +/* The follow-up roughly shows the relationship between the parameters + * for pattern calculation. + * + * |< duration ref >| (if mid bt) |< duration aux >| + * |< tob ref >|< toa ref >| ... |< tob aux >|< toa aux >| + * V V + * tbtt ref tbtt aux + * |< beacon offset >| + * + * In loose pattern calculation, we only ensure at least tob_ref and + * toa_ref have positive results. If tob_aux or toa_aux is negative + * unfortunately, FW will be notified to handle it with courtesy + * mechanism. + */ +static void __rtw89_mcc_calc_pattern_loose(struct rtw89_dev *rtwdev, + struct rtw89_mcc_pattern *ptrn, + bool hdl_bt) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 bcn_ofst = config->beacon_offset; + u16 bt_dur_in_mid = 0; + u16 max_bcn_ofst; + s16 upper, lower; + u16 res; + + *ptrn = (typeof(*ptrn)){ + .plan = hdl_bt ? RTW89_MCC_PLAN_TAIL_BT : RTW89_MCC_PLAN_NO_BT, + }; + + if (!hdl_bt) + goto calc; + + max_bcn_ofst = ref->duration + aux->duration; + if (ref->limit.enable) + max_bcn_ofst = min_t(u16, max_bcn_ofst, + ref->limit.max_toa + aux->duration); + else if (aux->limit.enable) + max_bcn_ofst = min_t(u16, max_bcn_ofst, + ref->duration + aux->limit.max_tob); + + if (bcn_ofst > max_bcn_ofst && bcn_ofst >= mcc->bt_role.duration) { + bt_dur_in_mid = mcc->bt_role.duration; + ptrn->plan = RTW89_MCC_PLAN_MID_BT; } +calc: rtw89_debug(rtwdev, RTW89_DBG_CHAN, - "queue chanctx work for mode %d with delay %d us\n", - mode, delay); - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work, - usecs_to_jiffies(delay)); + "MCC calc ptrn_ls: plan %d, bcn_ofst %d\n", + ptrn->plan, bcn_ofst); + + res = bcn_ofst - bt_dur_in_mid; + upper = min_t(s16, ref->duration, res); + lower = 0; + + if (ref->limit.enable) { + upper = min_t(s16, upper, ref->limit.max_toa); + lower = max_t(s16, lower, ref->duration - ref->limit.max_tob); + } else if (aux->limit.enable) { + upper = min_t(s16, upper, + res - (aux->duration - aux->limit.max_toa)); + lower = max_t(s16, lower, res - aux->limit.max_tob); + } + + if (lower < upper) + ptrn->toa_ref = (upper + lower) / 2; + else + ptrn->toa_ref = lower; + + ptrn->tob_ref = ref->duration - ptrn->toa_ref; + ptrn->tob_aux = res - ptrn->toa_ref; + ptrn->toa_aux = aux->duration - ptrn->tob_aux; +} + +/* In strict pattern calculation, we consider timing that might need + * for HW stuffs, i.e. min_tob and min_toa. + */ +static int __rtw89_mcc_calc_pattern_strict(struct rtw89_dev *rtwdev, + struct rtw89_mcc_pattern *ptrn) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 min_tob = RTW89_MCC_EARLY_RX_BCN_TIME; + u16 min_toa = RTW89_MCC_MIN_RX_BCN_TIME; + u16 bcn_ofst = config->beacon_offset; + s16 upper_toa_ref, lower_toa_ref; + s16 upper_tob_aux, lower_tob_aux; + u16 bt_dur_in_mid; + s16 res; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: plan %d, bcn_ofst %d\n", + ptrn->plan, bcn_ofst); + + if (ptrn->plan == RTW89_MCC_PLAN_MID_BT) + bt_dur_in_mid = mcc->bt_role.duration; + else + bt_dur_in_mid = 0; + + if (ref->duration < min_tob + min_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: not meet ref dur cond\n"); + return -EINVAL; + } + + if (aux->duration < min_tob + min_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: not meet aux dur cond\n"); + return -EINVAL; + } + + res = bcn_ofst - min_toa - min_tob - bt_dur_in_mid; + if (res < 0) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: not meet bcn_ofst cond\n"); + return -EINVAL; + } + + upper_toa_ref = min_t(s16, min_toa + res, ref->duration - min_tob); + lower_toa_ref = min_toa; + upper_tob_aux = min_t(s16, min_tob + res, aux->duration - min_toa); + lower_tob_aux = min_tob; + + if (ref->limit.enable) { + if (min_tob > ref->limit.max_tob || min_toa > ref->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: conflict ref limit\n"); + return -EINVAL; + } + + upper_toa_ref = min_t(s16, upper_toa_ref, ref->limit.max_toa); + lower_toa_ref = max_t(s16, lower_toa_ref, + ref->duration - ref->limit.max_tob); + } else if (aux->limit.enable) { + if (min_tob > aux->limit.max_tob || min_toa > aux->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: conflict aux limit\n"); + return -EINVAL; + } + + upper_tob_aux = min_t(s16, upper_tob_aux, aux->limit.max_tob); + lower_tob_aux = max_t(s16, lower_tob_aux, + aux->duration - aux->limit.max_toa); + } + + upper_toa_ref = min_t(s16, upper_toa_ref, + bcn_ofst - bt_dur_in_mid - lower_tob_aux); + lower_toa_ref = max_t(s16, lower_toa_ref, + bcn_ofst - bt_dur_in_mid - upper_tob_aux); + if (lower_toa_ref > upper_toa_ref) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: conflict boundary\n"); + return -EINVAL; + } + + ptrn->toa_ref = (upper_toa_ref + lower_toa_ref) / 2; + ptrn->tob_ref = ref->duration - ptrn->toa_ref; + ptrn->tob_aux = bcn_ofst - ptrn->toa_ref - bt_dur_in_mid; + ptrn->toa_aux = aux->duration - ptrn->tob_aux; + return 0; +} + +static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + bool sel_plan[NUM_OF_RTW89_MCC_PLAN] = {}; + struct rtw89_mcc_pattern ptrn; + int ret; + int i; + + if (ref->limit.enable && aux->limit.enable) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn: not support dual limited roles\n"); + return -EINVAL; + } + + if (ref->limit.enable && + ref->duration > ref->limit.max_tob + ref->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn: not fit ref limit\n"); + return -EINVAL; + } + + if (aux->limit.enable && + aux->duration > aux->limit.max_tob + aux->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn: not fit aux limit\n"); + return -EINVAL; + } + + if (hdl_bt) { + sel_plan[RTW89_MCC_PLAN_TAIL_BT] = true; + sel_plan[RTW89_MCC_PLAN_MID_BT] = true; + } else { + sel_plan[RTW89_MCC_PLAN_NO_BT] = true; + } + + for (i = 0; i < NUM_OF_RTW89_MCC_PLAN; i++) { + if (!sel_plan[i]) + continue; + + ptrn = (typeof(ptrn)){ + .plan = i, + }; + + ret = __rtw89_mcc_calc_pattern_strict(rtwdev, &ptrn); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st with plan %d: fail\n", i); + else + goto done; + } + + __rtw89_mcc_calc_pattern_loose(rtwdev, &ptrn, hdl_bt); + +done: + rtw89_mcc_assign_pattern(rtwdev, &ptrn); + return 0; +} + +static void rtw89_mcc_set_default_pattern(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_pattern tmp = {}; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC use default pattern unexpectedly\n"); + + tmp.plan = RTW89_MCC_PLAN_NO_BT; + tmp.tob_ref = ref->duration / 2; + tmp.toa_ref = ref->duration - tmp.tob_ref; + tmp.tob_aux = aux->duration / 2; + tmp.toa_aux = aux->duration - tmp.tob_aux; + + rtw89_mcc_assign_pattern(rtwdev, &tmp); +} + +static void rtw89_mcc_set_duration_go_sta(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *role_go, + struct rtw89_mcc_role *role_sta) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + u16 mcc_intvl = config->mcc_interval; + u16 dur_go, dur_sta; + + dur_go = clamp_t(u16, role_go->duration, RTW89_MCC_MIN_GO_DURATION, + mcc_intvl - RTW89_MCC_MIN_STA_DURATION); + if (role_go->limit.enable) + dur_go = min(dur_go, role_go->limit.max_dur); + dur_sta = mcc_intvl - dur_go; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC set dur: (go, sta) {%d, %d} -> {%d, %d}\n", + role_go->duration, role_sta->duration, dur_go, dur_sta); + + role_go->duration = dur_go; + role_sta->duration = dur_sta; +} + +static void rtw89_mcc_set_duration_gc_sta(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 mcc_intvl = config->mcc_interval; + u16 dur_ref, dur_aux; + + if (ref->duration < RTW89_MCC_MIN_STA_DURATION) { + dur_ref = RTW89_MCC_MIN_STA_DURATION; + dur_aux = mcc_intvl - dur_ref; + } else if (aux->duration < RTW89_MCC_MIN_STA_DURATION) { + dur_aux = RTW89_MCC_MIN_STA_DURATION; + dur_ref = mcc_intvl - dur_aux; + } else { + dur_ref = ref->duration; + dur_aux = mcc_intvl - dur_ref; + } + + if (ref->limit.enable) { + dur_ref = min(dur_ref, ref->limit.max_dur); + dur_aux = mcc_intvl - dur_ref; + } else if (aux->limit.enable) { + dur_aux = min(dur_aux, aux->limit.max_dur); + dur_ref = mcc_intvl - dur_aux; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC set dur: (ref, aux) {%d ~ %d} -> {%d ~ %d}\n", + ref->duration, aux->duration, dur_ref, dur_aux); + + ref->duration = dur_ref; + aux->duration = dur_aux; +} + +struct rtw89_mcc_mod_dur_data { + u16 available; + struct { + u16 dur; + u16 room; + } parm[NUM_OF_RTW89_MCC_ROLES]; +}; + +static int rtw89_mcc_mod_dur_get_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_mod_dur_data *p = data; + u16 min; + + p->parm[ordered_idx].dur = mcc_role->duration; + + if (mcc_role->is_go) + min = RTW89_MCC_MIN_GO_DURATION; + else + min = RTW89_MCC_MIN_STA_DURATION; + + p->parm[ordered_idx].room = max_t(s32, p->parm[ordered_idx].dur - min, 0); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: chk role[%u]: dur %u, min %u, room %u\n", + ordered_idx, p->parm[ordered_idx].dur, min, + p->parm[ordered_idx].room); + + p->available += p->parm[ordered_idx].room; + return 0; +} + +static int rtw89_mcc_mod_dur_put_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_mod_dur_data *p = data; + + mcc_role->duration = p->parm[ordered_idx].dur; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: set role[%u]: dur %u\n", + ordered_idx, p->parm[ordered_idx].dur); + return 0; +} + +static void rtw89_mcc_mod_duration_dual_2ghz_with_bt(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_mod_dur_data data = {}; + u16 mcc_intvl = config->mcc_interval; + u16 bt_dur = mcc->bt_role.duration; + u16 wifi_dur; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur (dual 2ghz): mcc_intvl %u, raw bt_dur %u\n", + mcc_intvl, bt_dur); + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_mod_dur_get_iterator, &data); + + bt_dur = clamp_t(u16, bt_dur, 1, data.available / 3); + wifi_dur = mcc_intvl - bt_dur; + + if (data.parm[0].room <= data.parm[1].room) { + data.parm[0].dur -= min_t(u16, bt_dur / 2, data.parm[0].room); + data.parm[1].dur = wifi_dur - data.parm[0].dur; + } else { + data.parm[1].dur -= min_t(u16, bt_dur / 2, data.parm[1].room); + data.parm[0].dur = wifi_dur - data.parm[1].dur; + } + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_mod_dur_put_iterator, &data); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC mod dur: set bt: dur %u\n", bt_dur); + mcc->bt_role.duration = bt_dur; +} + +static +void rtw89_mcc_mod_duration_diff_band_with_bt(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *role_2ghz, + struct rtw89_mcc_role *role_non_2ghz) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + u16 dur_2ghz, dur_non_2ghz; + u16 bt_dur, mcc_intvl; + + dur_2ghz = role_2ghz->duration; + dur_non_2ghz = role_non_2ghz->duration; + mcc_intvl = config->mcc_interval; + bt_dur = mcc->bt_role.duration; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur (diff band): mcc_intvl %u, bt_dur %u\n", + mcc_intvl, bt_dur); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: check dur_2ghz %u, dur_non_2ghz %u\n", + dur_2ghz, dur_non_2ghz); + + if (dur_non_2ghz >= bt_dur) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: dur_non_2ghz is enough for bt\n"); + return; + } + + dur_non_2ghz = bt_dur; + dur_2ghz = mcc_intvl - dur_non_2ghz; + + if (role_non_2ghz->limit.enable) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: dur_non_2ghz is limited with max %u\n", + role_non_2ghz->limit.max_dur); + + dur_non_2ghz = min(dur_non_2ghz, role_non_2ghz->limit.max_dur); + dur_2ghz = mcc_intvl - dur_non_2ghz; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: set dur_2ghz %u, dur_non_2ghz %u\n", + dur_2ghz, dur_non_2ghz); + + role_2ghz->duration = dur_2ghz; + role_non_2ghz->duration = dur_non_2ghz; +} + +static bool rtw89_mcc_duration_decision_on_bt(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role; + + if (!bt_role->duration) + return false; + + if (ref->is_2ghz && aux->is_2ghz) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC dual roles are on 2GHz; consider BT duration\n"); + + rtw89_mcc_mod_duration_dual_2ghz_with_bt(rtwdev); + return true; + } + + if (!ref->is_2ghz && !aux->is_2ghz) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC dual roles are not on 2GHz; ignore BT duration\n"); + return false; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC one role is on 2GHz; modify another for BT duration\n"); + + if (ref->is_2ghz) + rtw89_mcc_mod_duration_diff_band_with_bt(rtwdev, ref, aux); + else + rtw89_mcc_mod_duration_diff_band_with_bt(rtwdev, aux, ref); + + return false; +} + +static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *tgt, + struct rtw89_mcc_role *src, + bool ref_is_src) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + u16 beacon_offset_us = ieee80211_tu_to_usec(config->beacon_offset); + u32 bcn_intvl_src_us = ieee80211_tu_to_usec(src->beacon_interval); + u32 cur_tbtt_ofst_src; + u32 tsf_ofst_tgt; + u32 remainder; + u64 tbtt_tgt; + u64 tsf_src; + int ret; + + ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif, &tsf_src); + if (ret) { + rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); + return; + } + + cur_tbtt_ofst_src = rtw89_mcc_get_tbtt_ofst(rtwdev, src, tsf_src); + + if (ref_is_src) + tbtt_tgt = tsf_src - cur_tbtt_ofst_src + beacon_offset_us; + else + tbtt_tgt = tsf_src - cur_tbtt_ofst_src + + (bcn_intvl_src_us - beacon_offset_us); + + div_u64_rem(tbtt_tgt, bcn_intvl_src_us, &remainder); + tsf_ofst_tgt = bcn_intvl_src_us - remainder; + + config->sync.macid_tgt = tgt->rtwvif->mac_id; + config->sync.macid_src = src->rtwvif->mac_id; + config->sync.offset = tsf_ofst_tgt / 1024; + config->sync.enable = true; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC sync tbtt: tgt %d, src %d, offset %d\n", + config->sync.macid_tgt, config->sync.macid_src, + config->sync.offset); + + rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif, src->rtwvif, + config->sync.offset); +} + +static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_config *config = &mcc->config; + u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval); + u32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref); + struct rtw89_vif *rtwvif = ref->rtwvif; + u64 tsf, start_tsf; + u32 cur_tbtt_ofst; + u64 min_time; + int ret; + + ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf); + if (ret) { + rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); + return ret; + } + + min_time = tsf; + if (ref->is_go) + min_time += ieee80211_tu_to_usec(RTW89_MCC_SHORT_TRIGGER_TIME); + else + min_time += ieee80211_tu_to_usec(RTW89_MCC_LONG_TRIGGER_TIME); + + cur_tbtt_ofst = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf); + start_tsf = tsf - cur_tbtt_ofst + bcn_intvl_ref_us - tob_ref_us; + while (start_tsf < min_time) + start_tsf += bcn_intvl_ref_us; + + config->start_tsf = start_tsf; + return 0; +} + +static int rtw89_mcc_fill_config(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + bool hdl_bt; + int ret; + + memset(config, 0, sizeof(*config)); + + switch (mcc->mode) { + case RTW89_MCC_MODE_GO_STA: + config->beacon_offset = RTW89_MCC_DFLT_BCN_OFST_TIME; + if (ref->is_go) { + rtw89_mcc_sync_tbtt(rtwdev, ref, aux, false); + config->mcc_interval = ref->beacon_interval; + rtw89_mcc_set_duration_go_sta(rtwdev, ref, aux); + } else { + rtw89_mcc_sync_tbtt(rtwdev, aux, ref, true); + config->mcc_interval = aux->beacon_interval; + rtw89_mcc_set_duration_go_sta(rtwdev, aux, ref); + } + break; + case RTW89_MCC_MODE_GC_STA: + config->beacon_offset = rtw89_mcc_get_bcn_ofst(rtwdev); + config->mcc_interval = ref->beacon_interval; + rtw89_mcc_set_duration_gc_sta(rtwdev); + break; + default: + rtw89_warn(rtwdev, "MCC unknown mode: %d\n", mcc->mode); + return -EFAULT; + } + + hdl_bt = rtw89_mcc_duration_decision_on_bt(rtwdev); + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC handle bt: %d\n", hdl_bt); + + ret = rtw89_mcc_calc_pattern(rtwdev, hdl_bt); + if (!ret) + goto bottom; + + rtw89_mcc_set_default_pattern(rtwdev); + +bottom: + return rtw89_mcc_fill_start_tsf(rtwdev); +} + +static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy; + struct rtw89_mcc_policy *policy = &role->policy; + struct rtw89_fw_mcc_add_req req = {}; + const struct rtw89_chan *chan; + int ret; + + chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx); + req.central_ch_seg0 = chan->channel; + req.primary_ch = chan->primary_channel; + req.bandwidth = chan->band_width; + req.ch_band_type = chan->band_type; + + req.macid = role->rtwvif->mac_id; + req.group = mcc->group; + req.c2h_rpt = policy->c2h_rpt; + req.tx_null_early = policy->tx_null_early; + req.dis_tx_null = policy->dis_tx_null; + req.in_curr_ch = policy->in_curr_ch; + req.sw_retry_count = policy->sw_retry_count; + req.dis_sw_retry = policy->dis_sw_retry; + req.duration = role->duration; + req.btc_in_2g = false; + + if (courtesy->enable && courtesy->macid_src == req.macid) { + req.courtesy_target = courtesy->macid_tgt; + req.courtesy_num = courtesy->slot_num; + req.courtesy_en = true; + } + + ret = rtw89_fw_h2c_add_mcc(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to add wifi role: %d\n", ret); + return ret; + } + + ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group, + role->rtwvif->mac_id, + role->macid_bitmap); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to set macid bitmap: %d\n", ret); + return ret; + } + + return 0; +} + +static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role; + struct rtw89_fw_mcc_add_req req = {}; + int ret; + + req.group = mcc->group; + req.duration = bt_role->duration; + req.btc_in_2g = true; + + ret = rtw89_fw_h2c_add_mcc(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to add bt role: %d\n", ret); + return ret; + } + + return 0; +} + +static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + struct rtw89_mcc_sync *sync = &config->sync; + struct rtw89_fw_mcc_start_req req = {}; + int ret; + + if (replace) { + req.old_group = mcc->group; + req.old_group_action = RTW89_FW_MCC_OLD_GROUP_ACT_REPLACE; + mcc->group = RTW89_MCC_NEXT_GROUP(mcc->group); + } + + req.group = mcc->group; + + switch (pattern->plan) { + case RTW89_MCC_PLAN_TAIL_BT: + ret = __mcc_fw_add_role(rtwdev, ref); + if (ret) + return ret; + ret = __mcc_fw_add_role(rtwdev, aux); + if (ret) + return ret; + ret = __mcc_fw_add_bt_role(rtwdev); + if (ret) + return ret; + + req.btc_in_group = true; + break; + case RTW89_MCC_PLAN_MID_BT: + ret = __mcc_fw_add_role(rtwdev, ref); + if (ret) + return ret; + ret = __mcc_fw_add_bt_role(rtwdev); + if (ret) + return ret; + ret = __mcc_fw_add_role(rtwdev, aux); + if (ret) + return ret; + + req.btc_in_group = true; + break; + case RTW89_MCC_PLAN_NO_BT: + ret = __mcc_fw_add_role(rtwdev, ref); + if (ret) + return ret; + ret = __mcc_fw_add_role(rtwdev, aux); + if (ret) + return ret; + + req.btc_in_group = false; + break; + default: + rtw89_warn(rtwdev, "MCC unknown plan: %d\n", pattern->plan); + return -EFAULT; + } + + if (sync->enable) { + ret = rtw89_fw_h2c_mcc_sync(rtwdev, req.group, sync->macid_src, + sync->macid_tgt, sync->offset); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger sync: %d\n", ret); + return ret; + } + } + + req.macid = ref->rtwvif->mac_id; + req.tsf_high = config->start_tsf >> 32; + req.tsf_low = config->start_tsf; + + ret = rtw89_fw_h2c_start_mcc(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger start: %d\n", ret); + return ret; + } + + return 0; +} + +static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_sync *sync = &config->sync; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_fw_mcc_duration req = { + .group = mcc->group, + .btc_in_group = false, + .start_macid = ref->rtwvif->mac_id, + .macid_x = ref->rtwvif->mac_id, + .macid_y = aux->rtwvif->mac_id, + .duration_x = ref->duration, + .duration_y = aux->duration, + .start_tsf_high = config->start_tsf >> 32, + .start_tsf_low = config->start_tsf, + }; + int ret; + + ret = rtw89_fw_h2c_mcc_set_duration(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to set duration: %d\n", ret); + return ret; + } + + if (!sync->enable || !sync_changed) + return 0; + + ret = rtw89_fw_h2c_mcc_sync(rtwdev, mcc->group, sync->macid_src, + sync->macid_tgt, sync->offset); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger sync: %d\n", ret); + return ret; + } + + return 0; +} + +static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + struct rtw89_mcc_sync *sync = &config->sync; + struct ieee80211_p2p_noa_desc noa_desc = {}; + u64 start_time = config->start_tsf; + u32 interval = config->mcc_interval; + struct rtw89_vif *rtwvif_go; + u32 duration; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + if (ref->is_go) { + rtwvif_go = ref->rtwvif; + start_time += ieee80211_tu_to_usec(ref->duration); + duration = config->mcc_interval - ref->duration; + } else if (aux->is_go) { + rtwvif_go = aux->rtwvif; + start_time += ieee80211_tu_to_usec(pattern->tob_ref) + + ieee80211_tu_to_usec(config->beacon_offset) + + ieee80211_tu_to_usec(pattern->toa_aux); + duration = config->mcc_interval - aux->duration; + + /* convert time domain from sta(ref) to GO(aux) */ + start_time += ieee80211_tu_to_usec(sync->offset); + } else { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC find no GO: skip updating beacon NoA\n"); + return; + } + + rtw89_p2p_noa_renew(rtwvif_go); + + if (enable) { + noa_desc.start_time = cpu_to_le32(start_time); + noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(interval)); + noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(duration)); + noa_desc.count = 255; + rtw89_p2p_noa_append(rtwvif_go, &noa_desc); + } + + /* without chanctx, we cannot get beacon from mac80211 stack */ + if (!rtwvif_go->chanctx_assigned) + return; + + rtw89_fw_h2c_update_beacon(rtwdev, rtwvif_go); +} + +static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + if (ref->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, true); + else if (aux->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, true); + + rtw89_mcc_handle_beacon_noa(rtwdev, true); +} + +static void rtw89_mcc_stop_beacon_noa(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + if (ref->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, false); + else if (aux->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, false); + + rtw89_mcc_handle_beacon_noa(rtwdev, false); +} + +static int rtw89_mcc_start(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + int ret; + + if (rtwdev->scanning) + rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); + + rtw89_leave_lps(rtwdev); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC start\n"); + + ret = rtw89_mcc_fill_all_roles(rtwdev); + if (ret) + return ret; + + if (ref->is_go || aux->is_go) + mcc->mode = RTW89_MCC_MODE_GO_STA; + else + mcc->mode = RTW89_MCC_MODE_GC_STA; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC sel mode: %d\n", mcc->mode); + + mcc->group = RTW89_MCC_DFLT_GROUP; + + ret = rtw89_mcc_fill_config(rtwdev); + if (ret) + return ret; + + ret = __mcc_fw_start(rtwdev, false); + if (ret) + return ret; + + rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_START); + + rtw89_mcc_start_beacon_noa(rtwdev); + return 0; +} + +static void rtw89_mcc_stop(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n"); + + ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group, + ref->rtwvif->mac_id, true); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger stop: %d\n", ret); + + ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to delete group: %d\n", ret); + + rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP); + + rtw89_mcc_stop_beacon_noa(rtwdev); +} + +static int rtw89_mcc_update(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_config old_cfg = *config; + bool sync_changed; + int ret; + + if (rtwdev->scanning) + rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC update\n"); + + ret = rtw89_mcc_fill_config(rtwdev); + if (ret) + return ret; + + if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT || + config->pattern.plan != RTW89_MCC_PLAN_NO_BT) { + ret = __mcc_fw_start(rtwdev, true); + if (ret) + return ret; + } else { + if (memcmp(&old_cfg.sync, &config->sync, sizeof(old_cfg.sync)) == 0) + sync_changed = false; + else + sync_changed = true; + + ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed); + if (ret) + return ret; + } + + rtw89_mcc_handle_beacon_noa(rtwdev, true); + return 0; +} + +static void rtw89_mcc_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + s16 tolerance; + u16 bcn_ofst; + u16 diff; + + if (mcc->mode != RTW89_MCC_MODE_GC_STA) + return; + + bcn_ofst = rtw89_mcc_get_bcn_ofst(rtwdev); + if (bcn_ofst > config->beacon_offset) { + diff = bcn_ofst - config->beacon_offset; + if (pattern->tob_aux < 0) + tolerance = -pattern->tob_aux; + else + tolerance = pattern->toa_aux; + } else { + diff = config->beacon_offset - bcn_ofst; + if (pattern->toa_aux < 0) + tolerance = -pattern->toa_aux; + else + tolerance = pattern->tob_aux; + } + + if (diff <= tolerance) + return; + + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BCN_OFFSET_CHANGE); +} + +static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role upd = { + .rtwvif = mcc_role->rtwvif, + }; + int ret; + + if (!mcc_role->is_go) + return 0; + + rtw89_mcc_fill_role_macid_bitmap(rtwdev, &upd); + if (memcmp(mcc_role->macid_bitmap, upd.macid_bitmap, + sizeof(mcc_role->macid_bitmap)) == 0) + return 0; + + ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group, + upd.rtwvif->mac_id, + upd.macid_bitmap); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to update macid bitmap: %d\n", ret); + return ret; + } + + memcpy(mcc_role->macid_bitmap, upd.macid_bitmap, + sizeof(mcc_role->macid_bitmap)); + return 0; +} + +static void rtw89_mcc_update_macid_bitmap(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_map_iterator, NULL); +} + +static int rtw89_mcc_upd_lmt_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + memset(&mcc_role->limit, 0, sizeof(mcc_role->limit)); + rtw89_mcc_fill_role_limit(rtwdev, mcc_role); + return 0; +} + +static void rtw89_mcc_update_limit(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + + if (mcc->mode != RTW89_MCC_MODE_GC_STA) + return; + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_lmt_iterator, NULL); +} + +void rtw89_chanctx_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + chanctx_work.work); + struct rtw89_hal *hal = &rtwdev->hal; + bool update_mcc_pattern = false; + enum rtw89_entity_mode mode; + u32 changed = 0; + int ret; + int i; + + mutex_lock(&rtwdev->mutex); + + if (hal->entity_pause) { + mutex_unlock(&rtwdev->mutex); + return; + } + + for (i = 0; i < NUM_OF_RTW89_CHANCTX_CHANGES; i++) { + if (test_and_clear_bit(i, hal->changes)) + changed |= BIT(i); + } + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC_PREPARE: + rtw89_set_entity_mode(rtwdev, RTW89_ENTITY_MODE_MCC); + rtw89_set_channel(rtwdev); + + ret = rtw89_mcc_start(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret); + break; + case RTW89_ENTITY_MODE_MCC: + if (changed & BIT(RTW89_CHANCTX_BCN_OFFSET_CHANGE) || + changed & BIT(RTW89_CHANCTX_P2P_PS_CHANGE) || + changed & BIT(RTW89_CHANCTX_BT_SLOT_CHANGE) || + changed & BIT(RTW89_CHANCTX_TSF32_TOGGLE_CHANGE)) + update_mcc_pattern = true; + if (changed & BIT(RTW89_CHANCTX_REMOTE_STA_CHANGE)) + rtw89_mcc_update_macid_bitmap(rtwdev); + if (changed & BIT(RTW89_CHANCTX_P2P_PS_CHANGE)) + rtw89_mcc_update_limit(rtwdev); + if (changed & BIT(RTW89_CHANCTX_BT_SLOT_CHANGE)) + rtw89_mcc_fill_bt_role(rtwdev); + if (update_mcc_pattern) { + ret = rtw89_mcc_update(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to update MCC: %d\n", + ret); + } + break; + default: + break; + } + + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_changes change) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + u32 delay; + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + default: + return; + case RTW89_ENTITY_MODE_MCC_PREPARE: + delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC_PREPARE); + break; + case RTW89_ENTITY_MODE_MCC: + delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC); + break; + } + + if (change != RTW89_CHANCTX_CHANGE_DFLT) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "set chanctx change %d\n", + change); + set_bit(change, hal->changes); + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "queue chanctx work for mode %d with delay %d us\n", + mode, delay); + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work, + usecs_to_jiffies(delay)); +} + +void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev) +{ + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_CHANGE_DFLT); +} + +void rtw89_chanctx_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + + lockdep_assert_held(&rtwdev->mutex); + + if (hal->entity_pause) + return; + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + rtw89_mcc_track(rtwdev); + break; + default: + break; + } +} + +void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_pause_reasons rsn) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + + lockdep_assert_held(&rtwdev->mutex); + + if (hal->entity_pause) + return; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", rsn); + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + rtw89_mcc_stop(rtwdev); + break; + default: + break; + } + + hal->entity_pause = true; +} + +void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + int ret; + + lockdep_assert_held(&rtwdev->mutex); + + if (!hal->entity_pause) + return; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx proceed\n"); + + hal->entity_pause = false; + rtw89_set_channel(rtwdev); + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + ret = rtw89_mcc_start(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret); + break; + default: + break; + } + + rtw89_queue_chanctx_work(rtwdev); } int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, @@ -415,6 +1987,7 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev, struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; rtwvif->sub_entity_idx = cfg->idx; + rtwvif->chanctx_assigned = true; return 0; } @@ -423,4 +1996,5 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx) { rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0; + rtwvif->chanctx_assigned = false; } diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index 448e6c5df9..9b98d8f4ee 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -9,6 +9,34 @@ /* The dwell time in TU before doing rtw89_chanctx_work(). */ #define RTW89_CHANCTX_TIME_MCC_PREPARE 100 +#define RTW89_CHANCTX_TIME_MCC 100 + +/* various MCC setting time in TU */ +#define RTW89_MCC_LONG_TRIGGER_TIME 300 +#define RTW89_MCC_SHORT_TRIGGER_TIME 100 +#define RTW89_MCC_EARLY_TX_BCN_TIME 10 +#define RTW89_MCC_EARLY_RX_BCN_TIME 5 +#define RTW89_MCC_MIN_RX_BCN_TIME 10 +#define RTW89_MCC_DFLT_BCN_OFST_TIME 40 + +#define RTW89_MCC_MIN_GO_DURATION \ + (RTW89_MCC_EARLY_TX_BCN_TIME + RTW89_MCC_MIN_RX_BCN_TIME) + +#define RTW89_MCC_MIN_STA_DURATION \ + (RTW89_MCC_EARLY_RX_BCN_TIME + RTW89_MCC_MIN_RX_BCN_TIME) + +#define RTW89_MCC_DFLT_GROUP 0 +#define RTW89_MCC_NEXT_GROUP(cur) (((cur) + 1) % 4) + +#define RTW89_MCC_DFLT_TX_NULL_EARLY 3 +#define RTW89_MCC_DFLT_COURTESY_SLOT 3 + +#define NUM_OF_RTW89_MCC_ROLES 2 + +enum rtw89_chanctx_pause_reasons { + RTW89_CHANCTX_PAUSE_REASON_HW_SCAN, + RTW89_CHANCTX_PAUSE_REASON_ROC, +}; static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev) { @@ -55,6 +83,12 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev); enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev); void rtw89_chanctx_work(struct work_struct *work); void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev); +void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_changes change); +void rtw89_chanctx_track(struct rtw89_dev *rtwdev); +void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_pause_reasons rsn); +void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev); int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx); void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index 4ba8b3df70..ace7bbf2cf 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -131,7 +131,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = { .fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1, - .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, + .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .info_buf = 1800, .max_role_num = 6, }, {RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0), @@ -159,7 +159,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = { .fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1, - .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, + .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .info_buf = 1800, .max_role_num = 6, }, {RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0), @@ -237,13 +237,13 @@ struct rtw89_btc_btf_set_report { struct rtw89_btc_btf_set_slot_table { u8 fver; u8 tbl_num; - u8 buf[]; + struct rtw89_btc_fbtc_slot tbls[] __counted_by(tbl_num); } __packed; struct rtw89_btc_btf_set_mon_reg { u8 fver; u8 reg_num; - u8 buf[]; + struct rtw89_btc_fbtc_mreg regs[] __counted_by(reg_num); } __packed; enum btc_btf_set_cx_policy { @@ -1821,19 +1821,17 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev, static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num, struct rtw89_btc_fbtc_slot *s) { - struct rtw89_btc_btf_set_slot_table *tbl = NULL; - u8 *ptr = NULL; - u16 n = 0; + struct rtw89_btc_btf_set_slot_table *tbl; + u16 n; - n = sizeof(*s) * num + sizeof(*tbl); + n = struct_size(tbl, tbls, num); tbl = kmalloc(n, GFP_KERNEL); if (!tbl) return; tbl->fver = BTF_SET_SLOT_TABLE_VER; tbl->tbl_num = num; - ptr = &tbl->buf[0]; - memcpy(ptr, s, num * sizeof(*s)); + memcpy(tbl->tbls, s, flex_array_size(tbl, tbls, num)); _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n); @@ -1845,7 +1843,7 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_btc_ver *ver = rtwdev->btc.ver; struct rtw89_btc_btf_set_mon_reg *monreg = NULL; - u8 n, *ptr = NULL, ulen, cxmreg_max; + u8 n, ulen, cxmreg_max; u16 sz = 0; n = chip->mon_reg_num; @@ -1866,16 +1864,15 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) return; } - ulen = sizeof(struct rtw89_btc_fbtc_mreg); - sz = (ulen * n) + sizeof(*monreg); + ulen = sizeof(monreg->regs[0]); + sz = struct_size(monreg, regs, n); monreg = kmalloc(sz, GFP_KERNEL); if (!monreg) return; monreg->fver = ver->fcxmreg; monreg->reg_num = n; - ptr = &monreg->buf[0]; - memcpy(ptr, chip->mon_reg, n * ulen); + memcpy(monreg->regs, chip->mon_reg, flex_array_size(monreg, regs, n)); rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): sz=%d ulen=%d n=%d\n", __func__, sz, ulen, n); @@ -3840,7 +3837,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) if (mode == BTC_WLINK_25G_MCC) return; - rtw89_ctrl_btg(rtwdev, is_btg); + rtw89_ctrl_btg_bt_rx(rtwdev, is_btg, RTW89_PHY_0); } struct rtw89_txtime_data { diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 133bf289ba..a3624ebf01 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -172,13 +172,31 @@ static const struct ieee80211_iface_limit rtw89_iface_limits[] = { }, }; +static const struct ieee80211_iface_limit rtw89_iface_limits_mcc[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), + }, +}; + static const struct ieee80211_iface_combination rtw89_iface_combs[] = { { .limits = rtw89_iface_limits, .n_limits = ARRAY_SIZE(rtw89_iface_limits), .max_interfaces = 2, .num_different_channels = 1, - } + }, + { + .limits = rtw89_iface_limits_mcc, + .n_limits = ARRAY_SIZE(rtw89_iface_limits_mcc), + .max_interfaces = 2, + .num_different_channels = 2, + }, }; bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate) @@ -1215,6 +1233,136 @@ void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1); +static __le32 rtw89_build_txwd_body0_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) | + FIELD_PREP(BE_TXD_BODY0_WDINFO_EN, desc_info->en_wd_info) | + FIELD_PREP(BE_TXD_BODY0_CH_DMA, desc_info->ch_dma) | + FIELD_PREP(BE_TXD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | + FIELD_PREP(BE_TXD_BODY0_WD_PAGE, desc_info->wd_page); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body1_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) | + FIELD_PREP(BE_TXD_BODY1_SEC_KEYID, desc_info->sec_keyid) | + FIELD_PREP(BE_TXD_BODY1_SEC_TYPE, desc_info->sec_type); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY2_TID_IND, desc_info->tid_indicate) | + FIELD_PREP(BE_TXD_BODY2_QSEL, desc_info->qsel) | + FIELD_PREP(BE_TXD_BODY2_TXPKTSIZE, desc_info->pkt_size) | + FIELD_PREP(BE_TXD_BODY2_AGG_EN, desc_info->agg_en) | + FIELD_PREP(BE_TXD_BODY2_BK, desc_info->bk) | + FIELD_PREP(BE_TXD_BODY2_MACID, desc_info->mac_id); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body4_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) | + FIELD_PREP(BE_TXD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body5_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) | + FIELD_PREP(BE_TXD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) | + FIELD_PREP(BE_TXD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) | + FIELD_PREP(BE_TXD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY7_USERATE_SEL, desc_info->use_rate) | + FIELD_PREP(BE_TXD_BODY7_DATA_ER, desc_info->er_cap) | + FIELD_PREP(BE_TXD_BODY7_DATA_BW_ER, 0) | + FIELD_PREP(BE_TXD_BODY7_DATARATE, desc_info->data_rate); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) | + FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info1_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO1_MAX_AGG_NUM, desc_info->ampdu_num) | + FIELD_PREP(BE_TXD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) | + FIELD_PREP(BE_TXD_INFO1_DATA_RTY_LOWEST_RATE, + desc_info->data_retry_lowest_rate); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | + FIELD_PREP(BE_TXD_INFO2_FORCE_KEY_EN, desc_info->sec_en) | + FIELD_PREP(BE_TXD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, 1) | + FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1); + + return cpu_to_le32(dword); +} + +void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + struct rtw89_txwd_body_v2 *txwd_body = txdesc; + struct rtw89_txwd_info_v2 *txwd_info; + + txwd_body->dword0 = rtw89_build_txwd_body0_v2(desc_info); + txwd_body->dword1 = rtw89_build_txwd_body1_v2(desc_info); + txwd_body->dword2 = rtw89_build_txwd_body2_v2(desc_info); + txwd_body->dword3 = rtw89_build_txwd_body3_v2(desc_info); + if (desc_info->sec_en) { + txwd_body->dword4 = rtw89_build_txwd_body4_v2(desc_info); + txwd_body->dword5 = rtw89_build_txwd_body5_v2(desc_info); + } + txwd_body->dword7 = rtw89_build_txwd_body7_v2(desc_info); + + if (!desc_info->en_wd_info) + return; + + txwd_info = (struct rtw89_txwd_info_v2 *)(txwd_body + 1); + txwd_info->dword0 = rtw89_build_txwd_info0_v2(desc_info); + txwd_info->dword1 = rtw89_build_txwd_info1_v2(desc_info); + txwd_info->dword2 = rtw89_build_txwd_info2_v2(desc_info); + txwd_info->dword4 = rtw89_build_txwd_info4_v2(desc_info); +} +EXPORT_SYMBOL(rtw89_core_fill_txdesc_v2); + static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | @@ -1235,6 +1383,26 @@ void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1); +static __le32 rtw89_build_txwd_fwcmd0_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | + FIELD_PREP(BE_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ? + RTW89_CORE_RX_TYPE_FWDL : + RTW89_CORE_RX_TYPE_H2C); + + return cpu_to_le32(dword); +} + +void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + struct rtw89_rxdesc_short_v2 *txwd_v2 = (struct rtw89_rxdesc_short_v2 *)txdesc; + + txwd_v2->dword0 = rtw89_build_txwd_fwcmd0_v2(desc_info); +} +EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v2); + static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct rtw89_rx_phy_ppdu *phy_ppdu) @@ -1453,32 +1621,49 @@ static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev, phy_ppdu); } -static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev, - const struct rtw89_rx_desc_info *desc_info, - bool rx_status) +static u8 rtw89_rxdesc_to_nl_he_eht_gi(struct rtw89_dev *rtwdev, + u8 desc_info_gi, + bool rx_status, bool eht) { - switch (desc_info->gi_ltf) { + switch (desc_info_gi) { case RTW89_GILTF_SGI_4XHE08: case RTW89_GILTF_2XHE08: case RTW89_GILTF_1XHE08: - return NL80211_RATE_INFO_HE_GI_0_8; + return eht ? NL80211_RATE_INFO_EHT_GI_0_8 : + NL80211_RATE_INFO_HE_GI_0_8; case RTW89_GILTF_2XHE16: case RTW89_GILTF_1XHE16: - return NL80211_RATE_INFO_HE_GI_1_6; + return eht ? NL80211_RATE_INFO_EHT_GI_1_6 : + NL80211_RATE_INFO_HE_GI_1_6; case RTW89_GILTF_LGI_4XHE32: - return NL80211_RATE_INFO_HE_GI_3_2; + return eht ? NL80211_RATE_INFO_EHT_GI_3_2 : + NL80211_RATE_INFO_HE_GI_3_2; default: - rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info->gi_ltf); - return rx_status ? NL80211_RATE_INFO_HE_GI_3_2 : U8_MAX; + rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi); + if (rx_status) + return eht ? NL80211_RATE_INFO_EHT_GI_3_2 : + NL80211_RATE_INFO_HE_GI_3_2; + return U8_MAX; } } +static +bool rtw89_check_rx_statu_gi_match(struct ieee80211_rx_status *status, u8 gi_ltf, + bool eht) +{ + if (eht) + return status->eht.gi == gi_ltf; + + return status->he_gi == gi_ltf; +} + static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, struct ieee80211_rx_status *status) { u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf; + bool eht = false; u16 data_rate; bool ret; @@ -1489,19 +1674,20 @@ static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev, /* rate_idx is still hardware value here */ } else if (data_rate_mode == DATA_RATE_MODE_HT) { rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate); - } else if (data_rate_mode == DATA_RATE_MODE_VHT) { - rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); - } else if (data_rate_mode == DATA_RATE_MODE_HE) { + } else if (data_rate_mode == DATA_RATE_MODE_VHT || + data_rate_mode == DATA_RATE_MODE_HE || + data_rate_mode == DATA_RATE_MODE_EHT) { rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); } else { rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); } + eht = data_rate_mode == DATA_RATE_MODE_EHT; bw = rtw89_hw_to_rate_info_bw(desc_info->bw); - gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false); + gi_ltf = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, false, eht); ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt && status->rate_idx == rate_idx && - status->he_gi == gi_ltf && + rtw89_check_rx_statu_gi_match(status, gi_ltf, eht) && status->bw == bw; return ret; @@ -1521,8 +1707,8 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data; - u8 *pos, *end, type; - u16 aid; + u8 *pos, *end, type, tf_bw; + u16 aid, tf_rua; if (!ether_addr_equal(vif->bss_conf.bssid, tf->ta) || rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || @@ -1530,7 +1716,7 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, return; type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK); - if (type != IEEE80211_TRIGGER_TYPE_BASIC) + if (type != IEEE80211_TRIGGER_TYPE_BASIC && type != IEEE80211_TRIGGER_TYPE_MU_BAR) return; end = (u8 *)tf + skb->len; @@ -1538,17 +1724,24 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) { aid = RTW89_GET_TF_USER_INFO_AID12(pos); + tf_rua = RTW89_GET_TF_USER_INFO_RUA(pos); + tf_bw = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_ULBW_MASK); rtw89_debug(rtwdev, RTW89_DBG_TXRX, - "[TF] aid: %d, ul_mcs: %d, rua: %d\n", + "[TF] aid: %d, ul_mcs: %d, rua: %d, bw: %d\n", aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos), - RTW89_GET_TF_USER_INFO_RUA(pos)); + tf_rua, tf_bw); if (aid == RTW89_TF_PAD) break; if (aid == vif->cfg.aid) { + enum nl80211_he_ru_alloc rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1); + rtwvif->stats.rx_tf_acc++; rtwdev->stats.rx_tf_acc++; + if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ && + rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106) + rtwvif->pwr_diff_en = true; break; } @@ -1714,6 +1907,72 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status) rx_status->rate_idx -= 4; } +static const u8 rx_status_bw_to_radiotap_eht_usig[] = { + [RATE_INFO_BW_20] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ, + [RATE_INFO_BW_5] = U8_MAX, + [RATE_INFO_BW_10] = U8_MAX, + [RATE_INFO_BW_40] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ, + [RATE_INFO_BW_80] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ, + [RATE_INFO_BW_160] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ, + [RATE_INFO_BW_HE_RU] = U8_MAX, + [RATE_INFO_BW_320] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1, + [RATE_INFO_BW_EHT_RU] = U8_MAX, +}; + +static void rtw89_core_update_radiotap_eht(struct rtw89_dev *rtwdev, + struct sk_buff *skb, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_radiotap_eht_usig *usig; + struct ieee80211_radiotap_eht *eht; + struct ieee80211_radiotap_tlv *tlv; + int eht_len = struct_size(eht, user_info, 1); + int usig_len = sizeof(*usig); + int len; + u8 bw; + + len = sizeof(*tlv) + ALIGN(eht_len, 4) + + sizeof(*tlv) + ALIGN(usig_len, 4); + + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + skb_reset_mac_header(skb); + + /* EHT */ + tlv = skb_push(skb, len); + memset(tlv, 0, len); + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT); + tlv->len = cpu_to_le16(eht_len); + + eht = (struct ieee80211_radiotap_eht *)tlv->data; + eht->known = cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI); + eht->data[0] = + le32_encode_bits(rx_status->eht.gi, IEEE80211_RADIOTAP_EHT_DATA0_GI); + + eht->user_info[0] = + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O); + eht->user_info[0] |= + le32_encode_bits(rx_status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + le32_encode_bits(rx_status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O); + + /* U-SIG */ + tlv = (void *)tlv + sizeof(*tlv) + ALIGN(eht_len, 4); + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG); + tlv->len = cpu_to_le16(usig_len); + + if (rx_status->bw >= ARRAY_SIZE(rx_status_bw_to_radiotap_eht_usig)) + return; + + bw = rx_status_bw_to_radiotap_eht_usig[rx_status->bw]; + if (bw == U8_MAX) + return; + + usig = (struct ieee80211_radiotap_eht_usig *)tlv->data; + usig->common = + le32_encode_bits(1, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN) | + le32_encode_bits(bw, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW); +} + static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct ieee80211_rx_status *rx_status) @@ -1732,6 +1991,8 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev, rx_status->flag |= RX_FLAG_RADIOTAP_HE; he = skb_push(skb, sizeof(*he)); *he = known_he; + } else if (rx_status->encoding == RX_ENC_EHT) { + rtw89_core_update_radiotap_eht(rtwdev, skb, rx_status); } } @@ -1744,7 +2005,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, struct napi_struct *napi = &rtwdev->napi; /* In low power mode, napi isn't scheduled. Receive it to netif. */ - if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state))) + if (unlikely(!napi_is_scheduled(napi))) napi = NULL; rtw89_core_hw_to_sband_rate(rx_status); @@ -1875,6 +2136,71 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_query_rxdesc); +void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + u8 *data, u32 data_offset) +{ + struct rtw89_rxdesc_short_v2 *rxd_s; + struct rtw89_rxdesc_long_v2 *rxd_l; + u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len; + + rxd_s = (struct rtw89_rxdesc_short_v2 *)(data + data_offset); + + desc_info->pkt_size = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_LEN_MASK); + desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, BE_RXD_DRV_INFO_SZ_MASK); + desc_info->phy_rpt_size = le32_get_bits(rxd_s->dword0, BE_RXD_PHY_RPT_SZ_MASK); + desc_info->hdr_cnv_size = le32_get_bits(rxd_s->dword0, BE_RXD_HDR_CNV_SZ_MASK); + desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK); + desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD); + desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK); + if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT) + desc_info->mac_info_valid = true; + + desc_info->frame_type = le32_get_bits(rxd_s->dword2, BE_RXD_TYPE_MASK); + desc_info->mac_id = le32_get_bits(rxd_s->dword2, BE_RXD_MAC_ID_MASK); + desc_info->addr_cam_valid = le32_get_bits(rxd_s->dword2, BE_RXD_ADDR_CAM_VLD); + + desc_info->icv_err = le32_get_bits(rxd_s->dword3, BE_RXD_ICV_ERR); + desc_info->crc32_err = le32_get_bits(rxd_s->dword3, BE_RXD_CRC32_ERR); + desc_info->hw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_HW_DEC); + desc_info->sw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_SW_DEC); + desc_info->addr1_match = le32_get_bits(rxd_s->dword3, BE_RXD_A1_MATCH); + + desc_info->bw = le32_get_bits(rxd_s->dword4, BE_RXD_BW_MASK); + desc_info->data_rate = le32_get_bits(rxd_s->dword4, BE_RXD_RX_DATARATE_MASK); + desc_info->gi_ltf = le32_get_bits(rxd_s->dword4, BE_RXD_RX_GI_LTF_MASK); + desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_CNT_MASK); + desc_info->ppdu_type = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_TYPE_MASK); + + desc_info->free_run_cnt = le32_to_cpu(rxd_s->dword5); + + shift_len = desc_info->shift << 1; /* 2-byte unit */ + drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */ + phy_rtp_len = desc_info->phy_rpt_size << 3; /* 8-byte unit */ + hdr_cnv_len = desc_info->hdr_cnv_size << 4; /* 16-byte unit */ + desc_info->offset = data_offset + shift_len + drv_info_len + + phy_rtp_len + hdr_cnv_len; + + if (desc_info->long_rxdesc) + desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long_v2); + else + desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2); + desc_info->ready = true; + + if (!desc_info->long_rxdesc) + return; + + rxd_l = (struct rtw89_rxdesc_long_v2 *)(data + data_offset); + + desc_info->sr_en = le32_get_bits(rxd_l->dword6, BE_RXD_SR_EN); + desc_info->user_id = le32_get_bits(rxd_l->dword6, BE_RXD_USER_ID_MASK); + desc_info->addr_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_ADDR_CAM_MASK); + desc_info->sec_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_SEC_CAM_IDX_MASK); + + desc_info->rx_pl_id = le32_get_bits(rxd_l->dword7, BE_RXD_RX_PL_ID_MASK); +} +EXPORT_SYMBOL(rtw89_core_query_rxdesc_v2); + struct rtw89_core_iter_rx_status { struct rtw89_dev *rtwdev; struct ieee80211_rx_status *rx_status; @@ -1928,6 +2254,8 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0); u16 data_rate; u8 data_rate_mode; + bool eht = false; + u8 gi; /* currently using single PHY */ rx_status->freq = chandef->chan->center_freq; @@ -1975,12 +2303,21 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, rx_status->encoding = RX_ENC_HE; rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; + } else if (data_rate_mode == DATA_RATE_MODE_EHT) { + rx_status->encoding = RX_ENC_EHT; + rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); + rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; + eht = true; } else { rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); } /* he_gi is used to match ppdu, so we always fill it. */ - rx_status->he_gi = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, true); + gi = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, true, eht); + if (eht) + rx_status->eht.gi = gi; + else + rx_status->he_gi = gi; rx_status->flag |= RX_FLAG_MACTIME_START; rx_status->mactime = desc_info->free_run_cnt; @@ -2491,6 +2828,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_ips_by_hwflags(rtwdev); rtw89_leave_lps(rtwdev); + rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC); ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, true); if (ret) @@ -2533,7 +2871,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) roc->state = RTW89_ROC_IDLE; rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, NULL); - rtw89_set_channel(rtwdev); + rtw89_chanctx_proceed(rtwdev); ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, false); if (ret) rtw89_debug(rtwdev, RTW89_DBG_TXRX, @@ -2548,7 +2886,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) if (hw->conf.flags & IEEE80211_CONF_IDLE) ieee80211_queue_delayed_work(hw, &roc->roc_work, - RTW89_ROC_IDLE_TIMEOUT); + msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT)); } void rtw89_roc_work(struct work_struct *work) @@ -2662,6 +3000,27 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev) rtw89_vif_enter_lps(rtwdev, rtwvif); } +static void rtw89_core_rfk_track(struct rtw89_dev *rtwdev) +{ + enum rtw89_entity_mode mode; + + mode = rtw89_get_entity_mode(rtwdev); + if (mode == RTW89_ENTITY_MODE_MCC) + return; + + rtw89_chip_rfk_track(rtwdev); +} + +void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) +{ + enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); + + if (mode == RTW89_ENTITY_MODE_MCC) + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_P2P_PS_CHANGE); + else + rtw89_process_p2p_ps(rtwdev, vif); +} + void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, struct rtw89_traffic_stats *stats) { @@ -2704,13 +3063,14 @@ static void rtw89_track_work(struct work_struct *work) rtw89_phy_stat_track(rtwdev); rtw89_phy_env_monitor_track(rtwdev); rtw89_phy_dig(rtwdev); - rtw89_chip_rfk_track(rtwdev); + rtw89_core_rfk_track(rtwdev); rtw89_phy_ra_update(rtwdev); rtw89_phy_cfo_track(rtwdev); rtw89_phy_tx_path_div_track(rtwdev); rtw89_phy_antdiv_track(rtwdev); rtw89_phy_ul_tb_ctrl_track(rtwdev); rtw89_tas_track(rtwdev); + rtw89_chanctx_track(rtwdev); if (rtwdev->lps_enabled && !rtwdev->btc.lps) rtw89_enter_lps_track(rtwdev); @@ -2923,6 +3283,8 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, rtw89_warn(rtwdev, "failed to send h2c role info\n"); return ret; } + + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE); } return 0; @@ -3088,6 +3450,8 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, rtw89_warn(rtwdev, "failed to send h2c role info\n"); return ret; } + + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE); } return 0; @@ -3359,8 +3723,7 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev, idx++; } - sband->iftype_data = iftype_data; - sband->n_iftype_data = idx; + _ieee80211_set_sband_iftype_data(sband, iftype_data, idx); } static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev) @@ -3405,11 +3768,11 @@ err: hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL; if (sband_2ghz) - kfree(sband_2ghz->iftype_data); + kfree((__force void *)sband_2ghz->iftype_data); if (sband_5ghz) - kfree(sband_5ghz->iftype_data); + kfree((__force void *)sband_5ghz->iftype_data); if (sband_6ghz) - kfree(sband_6ghz->iftype_data); + kfree((__force void *)sband_6ghz->iftype_data); kfree(sband_2ghz); kfree(sband_5ghz); kfree(sband_6ghz); @@ -3421,11 +3784,11 @@ static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev) struct ieee80211_hw *hw = rtwdev->hw; if (hw->wiphy->bands[NL80211_BAND_2GHZ]) - kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data); + kfree((__force void *)hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data); if (hw->wiphy->bands[NL80211_BAND_5GHZ]) - kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data); + kfree((__force void *)hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data); if (hw->wiphy->bands[NL80211_BAND_6GHZ]) - kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data); + kfree((__force void *)hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data); kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]); @@ -3503,6 +3866,7 @@ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg eve bt_req_len = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0); rtw89_debug(rtwdev, RTW89_DBG_BTC, "coex updates BT req len to %d TU\n", bt_req_len); + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BT_SLOT_CHANGE); break; default: if (event < NUM_OF_RTW89_BTC_HMSG) @@ -3767,28 +4131,34 @@ static void rtw89_core_setup_rfe_parms(struct rtw89_dev *rtwdev) const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_rfe_parms_conf *conf = chip->rfe_parms_conf; struct rtw89_efuse *efuse = &rtwdev->efuse; + const struct rtw89_rfe_parms *sel; u8 rfe_type = efuse->rfe_type; - if (!conf) + if (!conf) { + sel = chip->dflt_parms; goto out; + } while (conf->rfe_parms) { if (rfe_type == conf->rfe_type) { - rtwdev->rfe_parms = conf->rfe_parms; - return; + sel = conf->rfe_parms; + goto out; } conf++; } + sel = chip->dflt_parms; + out: - rtwdev->rfe_parms = chip->dflt_parms; + rtwdev->rfe_parms = rtw89_load_rfe_data_from_fw(rtwdev, sel); + rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl); } static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) { int ret; - ret = rtw89_mac_partial_init(rtwdev); + ret = rtw89_mac_partial_init(rtwdev, false); if (ret) return ret; @@ -3805,7 +4175,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) return ret; rtw89_core_setup_phycap(rtwdev); - rtw89_core_setup_rfe_parms(rtwdev); rtw89_mac_pwr_off(rtwdev); @@ -3837,20 +4206,21 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev) return ret; } + ret = rtw89_chip_efuse_info_setup(rtwdev); + if (ret) + return ret; + ret = rtw89_fw_recognize_elements(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to recognize firmware elements\n"); return ret; } - ret = rtw89_chip_efuse_info_setup(rtwdev); - if (ret) - return ret; - ret = rtw89_chip_board_info_setup(rtwdev); if (ret) return ret; + rtw89_core_setup_rfe_parms(rtwdev); rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev); return 0; @@ -3892,6 +4262,10 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + + /* ref: description of rtw89_mcc_get_tbtt_ofst() in chan.c */ + ieee80211_hw_set(hw, TIMING_BEACON_ONLY); + if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) ieee80211_hw_set(hw, CONNECTION_MONITOR); @@ -4033,7 +4407,11 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, goto err; hw->wiphy->iface_combinations = rtw89_iface_combs; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw89_iface_combs); + + if (no_chanctx || chip->support_chanctx_num == 1) + hw->wiphy->n_iface_combinations = 1; + else + hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw89_iface_combs); rtwdev = hw->priv; rtwdev->hw = hw; @@ -4058,6 +4436,7 @@ EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw); void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev) { kfree(rtwdev->ops); + kfree(rtwdev->rfe_data); release_firmware(rtwdev->fw.req.firmware); ieee80211_free_hw(rtwdev->hw); } diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 04ce221730..8f59461e58 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -37,7 +37,14 @@ extern const struct ieee80211_ops rtw89_ops; #define RSSI_FACTOR 1 #define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI) #define RTW89_TX_DIV_RSSI_RAW_TH (2 << RSSI_FACTOR) -#define RTW89_RADIOTAP_ROOM ALIGN(sizeof(struct ieee80211_radiotap_he), 64) +#define RTW89_RADIOTAP_ROOM_HE sizeof(struct ieee80211_radiotap_he) +#define RTW89_RADIOTAP_ROOM_EHT \ + (sizeof(struct ieee80211_radiotap_tlv) + \ + ALIGN(struct_size((struct ieee80211_radiotap_eht *)0, user_info, 1), 4) + \ + sizeof(struct ieee80211_radiotap_tlv) + \ + ALIGN(sizeof(struct ieee80211_radiotap_eht_usig), 4)) +#define RTW89_RADIOTAP_ROOM \ + ALIGN(max(RTW89_RADIOTAP_ROOM_HE, RTW89_RADIOTAP_ROOM_EHT), 64) #define RTW89_HTC_MASK_VARIANT GENMASK(1, 0) #define RTW89_HTC_VARIANT_HE 3 @@ -640,12 +647,29 @@ enum rtw89_rate_section { RTW89_RS_TX_SHAPE_NUM = RTW89_RS_OFDM + 1, }; +enum rtw89_rate_offset_indexes { + RTW89_RATE_OFFSET_HE, + RTW89_RATE_OFFSET_VHT, + RTW89_RATE_OFFSET_HT, + RTW89_RATE_OFFSET_OFDM, + RTW89_RATE_OFFSET_CCK, + RTW89_RATE_OFFSET_DLRU_EHT, + RTW89_RATE_OFFSET_DLRU_HE, + RTW89_RATE_OFFSET_EHT, + __RTW89_RATE_OFFSET_NUM, + + RTW89_RATE_OFFSET_NUM_AX = RTW89_RATE_OFFSET_CCK + 1, + RTW89_RATE_OFFSET_NUM_BE = RTW89_RATE_OFFSET_EHT + 1, +}; + enum rtw89_rate_num { RTW89_RATE_CCK_NUM = 4, RTW89_RATE_OFDM_NUM = 8, - RTW89_RATE_MCS_NUM = 12, RTW89_RATE_HEDCM_NUM = 4, /* for HEDCM MCS0/1/3/4 */ - RTW89_RATE_OFFSET_NUM = 5, /* for HE(HEDCM)/VHT/HT/OFDM/CCK offset */ + + RTW89_RATE_MCS_NUM_AX = 12, + RTW89_RATE_MCS_NUM_BE = 16, + __RTW89_RATE_MCS_NUM = 16, }; enum rtw89_nss { @@ -670,6 +694,12 @@ enum rtw89_beamforming_type { RTW89_BF_NUM, }; +enum rtw89_ofdma_type { + RTW89_NON_OFDMA = 0, + RTW89_OFDMA = 1, + RTW89_OFDMA_NUM, +}; + enum rtw89_regulation_type { RTW89_WW = 0, RTW89_ETSI = 1, @@ -686,6 +716,7 @@ enum rtw89_regulation_type { RTW89_CN = 12, RTW89_QATAR = 13, RTW89_UK = 14, + RTW89_THAILAND = 15, RTW89_REGD_NUM, }; @@ -715,44 +746,16 @@ enum rtw89_fw_pkt_ofld_type { struct rtw89_txpwr_byrate { s8 cck[RTW89_RATE_CCK_NUM]; s8 ofdm[RTW89_RATE_OFDM_NUM]; - s8 mcs[RTW89_NSS_NUM][RTW89_RATE_MCS_NUM]; - s8 hedcm[RTW89_NSS_HEDCM_NUM][RTW89_RATE_HEDCM_NUM]; - s8 offset[RTW89_RATE_OFFSET_NUM]; -}; - -enum rtw89_bandwidth_section_num { - RTW89_BW20_SEC_NUM = 8, - RTW89_BW40_SEC_NUM = 4, - RTW89_BW80_SEC_NUM = 2, -}; - -#define RTW89_TXPWR_LMT_PAGE_SIZE 40 - -struct rtw89_txpwr_limit { - s8 cck_20m[RTW89_BF_NUM]; - s8 cck_40m[RTW89_BF_NUM]; - s8 ofdm[RTW89_BF_NUM]; - s8 mcs_20m[RTW89_BW20_SEC_NUM][RTW89_BF_NUM]; - s8 mcs_40m[RTW89_BW40_SEC_NUM][RTW89_BF_NUM]; - s8 mcs_80m[RTW89_BW80_SEC_NUM][RTW89_BF_NUM]; - s8 mcs_160m[RTW89_BF_NUM]; - s8 mcs_40m_0p5[RTW89_BF_NUM]; - s8 mcs_40m_2p5[RTW89_BF_NUM]; -}; - -#define RTW89_RU_SEC_NUM 8 - -#define RTW89_TXPWR_LMT_RU_PAGE_SIZE 24 - -struct rtw89_txpwr_limit_ru { - s8 ru26[RTW89_RU_SEC_NUM]; - s8 ru52[RTW89_RU_SEC_NUM]; - s8 ru106[RTW89_RU_SEC_NUM]; + s8 mcs[RTW89_OFDMA_NUM][RTW89_NSS_NUM][__RTW89_RATE_MCS_NUM]; + s8 hedcm[RTW89_OFDMA_NUM][RTW89_NSS_HEDCM_NUM][RTW89_RATE_HEDCM_NUM]; + s8 offset[__RTW89_RATE_OFFSET_NUM]; + s8 trap; }; struct rtw89_rate_desc { enum rtw89_nss nss; enum rtw89_rate_section rs; + enum rtw89_ofdma_type ofdma; u8 idx; }; @@ -841,9 +844,14 @@ enum rtw89_bandwidth { RTW89_CHANNEL_WIDTH_40 = 1, RTW89_CHANNEL_WIDTH_80 = 2, RTW89_CHANNEL_WIDTH_160 = 3, - RTW89_CHANNEL_WIDTH_80_80 = 4, - RTW89_CHANNEL_WIDTH_5 = 5, - RTW89_CHANNEL_WIDTH_10 = 6, + RTW89_CHANNEL_WIDTH_320 = 4, + + /* keep index order above */ + RTW89_CHANNEL_WIDTH_ORDINARY_NUM = 5, + + RTW89_CHANNEL_WIDTH_80_80 = 5, + RTW89_CHANNEL_WIDTH_5 = 6, + RTW89_CHANNEL_WIDTH_10 = 7, }; enum rtw89_ps_mode { @@ -855,13 +863,16 @@ enum rtw89_ps_mode { #define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1) #define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) -#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) +#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1) +#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1) #define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) enum rtw89_ru_bandwidth { RTW89_RU26 = 0, RTW89_RU52 = 1, RTW89_RU106 = 2, + RTW89_RU52_26 = 3, + RTW89_RU106_26 = 4, RTW89_RU_NUM, }; @@ -898,6 +909,7 @@ struct rtw89_chan { u32 freq; enum rtw89_subband subband_type; enum rtw89_sc_offset pri_ch_idx; + u8 pri_sb_idx; }; struct rtw89_chan_rcd { @@ -926,6 +938,12 @@ struct rtw89_port_reg { u32 bcn_cnt_tmr; u32 tsftr_l; u32 tsftr_h; + u32 md_tsft; + u32 bss_color; + u32 mbssid; + u32 mbssid_drop; + u32 tsf_sync; + u32 hiq_win[RTW89_PORT_NUM]; }; struct rtw89_txwd_body { @@ -948,6 +966,17 @@ struct rtw89_txwd_body_v1 { __le32 dword7; } __packed; +struct rtw89_txwd_body_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; +} __packed; + struct rtw89_txwd_info { __le32 dword0; __le32 dword1; @@ -957,10 +986,23 @@ struct rtw89_txwd_info { __le32 dword5; } __packed; +struct rtw89_txwd_info_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; +} __packed; + struct rtw89_rx_desc_info { u16 pkt_size; u8 pkt_type; u8 drv_info_size; + u8 phy_rpt_size; + u8 hdr_cnv_size; u8 shift; u8 wl_hd_iv_len; bool long_rxdesc; @@ -999,6 +1041,15 @@ struct rtw89_rxdesc_short { __le32 dword3; } __packed; +struct rtw89_rxdesc_short_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; +} __packed; + struct rtw89_rxdesc_long { __le32 dword0; __le32 dword1; @@ -1010,6 +1061,19 @@ struct rtw89_rxdesc_long { __le32 dword7; } __packed; +struct rtw89_rxdesc_long_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; + __le32 dword8; + __le32 dword9; +} __packed; + struct rtw89_tx_desc_info { u16 pkt_size; u8 wp_offset; @@ -2230,12 +2294,6 @@ struct rtw89_btc_fbtc_fddt_cell_status { u8 state_phase; /* [0:3] train state, [4:7] train phase */ } __packed; -struct rtw89_btc_fbtc_fddt_cell_status_v5 { - s8 wl_tx_pwr; - s8 bt_tx_pwr; - s8 bt_rx_gain; -} __packed; - struct rtw89_btc_fbtc_cysta_v3 { /* statistics for cycles */ u8 fver; u8 rsvd; @@ -2299,9 +2357,9 @@ struct rtw89_btc_fbtc_cysta_v5 { /* statistics for cycles */ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept; struct rtw89_btc_fbtc_a2dp_trx_stat_v4 a2dp_trx[BTC_CYCLE_SLOT_MAX]; struct rtw89_btc_fbtc_cycle_fddt_info_v5 fddt_trx[BTC_CYCLE_SLOT_MAX]; - struct rtw89_btc_fbtc_fddt_cell_status_v5 fddt_cells[FDD_TRAIN_WL_DIRECTION] - [FDD_TRAIN_WL_RSSI_LEVEL] - [FDD_TRAIN_BT_RSSI_LEVEL]; + struct rtw89_btc_fbtc_fddt_cell_status fddt_cells[FDD_TRAIN_WL_DIRECTION] + [FDD_TRAIN_WL_RSSI_LEVEL] + [FDD_TRAIN_BT_RSSI_LEVEL]; __le32 except_map; } __packed; @@ -2677,6 +2735,7 @@ enum rtw89_ra_mode { RTW89_RA_MODE_HT = BIT(2), RTW89_RA_MODE_VHT = BIT(3), RTW89_RA_MODE_HE = BIT(4), + RTW89_RA_MODE_EHT = BIT(5), }; enum rtw89_ra_report_mode { @@ -2684,6 +2743,7 @@ enum rtw89_ra_report_mode { RTW89_RA_RPT_MODE_HT, RTW89_RA_RPT_MODE_VHT, RTW89_RA_RPT_MODE_HE, + RTW89_RA_RPT_MODE_EHT, }; enum rtw89_dig_noisy_level { @@ -2930,6 +2990,7 @@ struct rtw89_vif { struct list_head list; struct rtw89_dev *rtwdev; struct rtw89_roc roc; + bool chanctx_assigned; /* only valid when running with chanctx_ops */ enum rtw89_sub_entity_idx sub_entity_idx; enum rtw89_reg_6ghz_power reg_6ghz_power; @@ -2957,6 +3018,8 @@ struct rtw89_vif { bool is_hesta; bool last_a_ctrl; bool dyn_tb_bedge_en; + bool pre_pwr_diff_en; + bool pwr_diff_en; u8 def_tri_idx; u32 tdls_peer; struct work_struct update_beacon_work; @@ -3032,6 +3095,7 @@ struct rtw89_hci_info { struct rtw89_chip_ops { int (*enable_bb_rf)(struct rtw89_dev *rtwdev); int (*disable_bb_rf)(struct rtw89_dev *rtwdev); + void (*bb_preinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void (*bb_reset)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void (*bb_sethw)(struct rtw89_dev *rtwdev); @@ -3066,11 +3130,13 @@ struct rtw89_chip_ops { enum rtw89_phy_idx phy_idx); int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path); - void (*ctrl_btg)(struct rtw89_dev *rtwdev, bool btg); + void (*ctrl_btg_bt_rx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); void (*query_ppdu)(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status); - void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en); + void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); void (*cfg_txrx_path)(struct rtw89_dev *rtwdev); void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx); @@ -3294,10 +3360,17 @@ struct rtw89_txpwr_rule_6ghz { [RTW89_6G_CH_NUM]; }; +struct rtw89_tx_shape { + const u8 (*lmt)[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM][RTW89_REGD_NUM]; + const u8 (*lmt_ru)[RTW89_BAND_NUM][RTW89_REGD_NUM]; +}; + struct rtw89_rfe_parms { + const struct rtw89_txpwr_table *byr_tbl; struct rtw89_txpwr_rule_2ghz rule_2ghz; struct rtw89_txpwr_rule_5ghz rule_5ghz; struct rtw89_txpwr_rule_6ghz rule_6ghz; + struct rtw89_tx_shape tx_shape; }; struct rtw89_rfe_parms_conf { @@ -3305,6 +3378,95 @@ struct rtw89_rfe_parms_conf { u8 rfe_type; }; +#define RTW89_TXPWR_CONF_DFLT_RFE_TYPE 0x0 + +struct rtw89_txpwr_conf { + u8 rfe_type; + u8 ent_sz; + u32 num_ents; + const void *data; +}; + +#define rtw89_txpwr_conf_valid(conf) (!!(conf)->data) + +#define rtw89_for_each_in_txpwr_conf(entry, cursor, conf) \ + for (typecheck(const void *, cursor), (cursor) = (conf)->data, \ + memcpy(&(entry), cursor, \ + min_t(u8, sizeof(entry), (conf)->ent_sz)); \ + (cursor) < (conf)->data + (conf)->num_ents * (conf)->ent_sz; \ + (cursor) += (conf)->ent_sz, \ + memcpy(&(entry), cursor, \ + min_t(u8, sizeof(entry), (conf)->ent_sz))) + +struct rtw89_txpwr_byrate_data { + struct rtw89_txpwr_conf conf; + struct rtw89_txpwr_table tbl; +}; + +struct rtw89_txpwr_lmt_2ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_5ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_6ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_6G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][NUM_OF_RTW89_REG_6GHZ_POWER] + [RTW89_6G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_ru_2ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_ru_5ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_ru_6ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][NUM_OF_RTW89_REG_6GHZ_POWER] + [RTW89_6G_CH_NUM]; +}; + +struct rtw89_tx_shape_lmt_data { + struct rtw89_txpwr_conf conf; + u8 v[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM][RTW89_REGD_NUM]; +}; + +struct rtw89_tx_shape_lmt_ru_data { + struct rtw89_txpwr_conf conf; + u8 v[RTW89_BAND_NUM][RTW89_REGD_NUM]; +}; + +struct rtw89_rfe_data { + struct rtw89_txpwr_byrate_data byrate; + struct rtw89_txpwr_lmt_2ghz_data lmt_2ghz; + struct rtw89_txpwr_lmt_5ghz_data lmt_5ghz; + struct rtw89_txpwr_lmt_6ghz_data lmt_6ghz; + struct rtw89_txpwr_lmt_ru_2ghz_data lmt_ru_2ghz; + struct rtw89_txpwr_lmt_ru_5ghz_data lmt_ru_5ghz; + struct rtw89_txpwr_lmt_ru_6ghz_data lmt_ru_6ghz; + struct rtw89_tx_shape_lmt_data tx_shape_lmt; + struct rtw89_tx_shape_lmt_ru_data tx_shape_lmt_ru; + struct rtw89_rfe_parms rfe_parms; +}; + struct rtw89_page_regs { u32 hci_fc_ctrl; u32 ch_page_ctrl; @@ -3428,6 +3590,7 @@ enum rtw89_chanctx_state { enum rtw89_chanctx_callbacks { RTW89_CHANCTX_CALLBACK_PLACEHOLDER, + RTW89_CHANCTX_CALLBACK_RFK, NUM_OF_RTW89_CHANCTX_CALLBACKS, }; @@ -3446,6 +3609,7 @@ struct rtw89_chip_info { const char *fw_basename; u8 fw_format_max; bool try_ce_fw; + u8 bbmcu_nr; u32 needed_fw_elms; u32 fifo_size; bool small_fifo_size; @@ -3462,7 +3626,8 @@ struct rtw89_chip_info { u8 support_bands; bool support_bw160; bool support_unii4; - bool support_ul_tb_ctrl; + bool ul_tb_waveform_ctrl; + bool ul_tb_pwr_diff; bool hw_sec_hdr; u8 rf_path_num; u8 tx_nss; @@ -3490,7 +3655,6 @@ struct rtw89_chip_info { const struct rtw89_phy_table *rf_table[RF_PATH_MAX]; const struct rtw89_phy_table *nctl_table; const struct rtw89_rfk_tbl *nctl_post_table; - const struct rtw89_txpwr_table *byr_table; const struct rtw89_phy_dig_gain_table *dig_table; const struct rtw89_dig_regs *dig_regs; const struct rtw89_phy_tssi_dbw_table *tssi_dbw_table; @@ -3527,6 +3691,7 @@ struct rtw89_chip_info { u32 hci_func_en_addr; u32 h2c_desc_size; u32 txwd_body_size; + u32 txwd_info_size; u32 h2c_ctrl_reg; const u32 *h2c_regs; struct rtw89_reg_def h2c_counter_reg; @@ -3540,6 +3705,7 @@ struct rtw89_chip_info { u8 dcfo_comp_sft; const struct rtw89_imr_info *imr_info; const struct rtw89_rrsr_cfgs *rrsr_cfgs; + struct rtw89_reg_def bss_clr_vld; u32 bss_clr_map_reg; u32 dma_ch_mask; u32 edcca_lvl_reg; @@ -3610,6 +3776,14 @@ struct rtw89_mac_info { struct rtw89_wait_info fw_ofld_wait; }; +enum rtw89_fwdl_check_type { + RTW89_FWDL_CHECK_FREERTOS_DONE, + RTW89_FWDL_CHECK_WCPU_FWDL_DONE, + RTW89_FWDL_CHECK_DCPU_FWDL_DONE, + RTW89_FWDL_CHECK_BB0_FWDL_DONE, + RTW89_FWDL_CHECK_BB1_FWDL_DONE, +}; + enum rtw89_fw_type { RTW89_FW_NORMAL = 1, RTW89_FW_WOWLAN = 3, @@ -3776,6 +3950,17 @@ struct rtw89_chanctx_cfg { enum rtw89_sub_entity_idx idx; }; +enum rtw89_chanctx_changes { + RTW89_CHANCTX_REMOTE_STA_CHANGE, + RTW89_CHANCTX_BCN_OFFSET_CHANGE, + RTW89_CHANCTX_P2P_PS_CHANGE, + RTW89_CHANCTX_BT_SLOT_CHANGE, + RTW89_CHANCTX_TSF32_TOGGLE_CHANGE, + + NUM_OF_RTW89_CHANCTX_CHANGES, + RTW89_CHANCTX_CHANGE_DFLT = NUM_OF_RTW89_CHANCTX_CHANGES, +}; + enum rtw89_entity_mode { RTW89_ENTITY_MODE_SCC, RTW89_ENTITY_MODE_MCC_PREPARE, @@ -3807,11 +3992,13 @@ struct rtw89_hal { bool support_igi; atomic_t roc_entity_idx; + DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES); DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY); struct rtw89_sub_entity sub[NUM_OF_RTW89_SUB_ENTITY]; struct cfg80211_chan_def roc_chandef; bool entity_active; + bool entity_pause; enum rtw89_entity_mode entity_mode; u32 edcca_bak; @@ -4357,8 +4544,95 @@ struct rtw89_wow_param { u8 pattern_cnt; }; +struct rtw89_mcc_limit { + bool enable; + u16 max_tob; /* TU; max time offset behind */ + u16 max_toa; /* TU; max time offset ahead */ + u16 max_dur; /* TU */ +}; + +struct rtw89_mcc_policy { + u8 c2h_rpt; + u8 tx_null_early; + u8 dis_tx_null; + u8 in_curr_ch; + u8 dis_sw_retry; + u8 sw_retry_count; +}; + +struct rtw89_mcc_role { + struct rtw89_vif *rtwvif; + struct rtw89_mcc_policy policy; + struct rtw89_mcc_limit limit; + + /* byte-array in LE order for FW */ + u8 macid_bitmap[BITS_TO_BYTES(RTW89_MAX_MAC_ID_NUM)]; + + u16 duration; /* TU */ + u16 beacon_interval; /* TU */ + bool is_2ghz; + bool is_go; + bool is_gc; +}; + +struct rtw89_mcc_bt_role { + u16 duration; /* TU */ +}; + +struct rtw89_mcc_courtesy { + bool enable; + u8 slot_num; + u8 macid_src; + u8 macid_tgt; +}; + +enum rtw89_mcc_plan { + RTW89_MCC_PLAN_TAIL_BT, + RTW89_MCC_PLAN_MID_BT, + RTW89_MCC_PLAN_NO_BT, + + NUM_OF_RTW89_MCC_PLAN, +}; + +struct rtw89_mcc_pattern { + s16 tob_ref; /* TU; time offset behind of reference role */ + s16 toa_ref; /* TU; time offset ahead of reference role */ + s16 tob_aux; /* TU; time offset behind of auxiliary role */ + s16 toa_aux; /* TU; time offset ahead of auxiliary role */ + + enum rtw89_mcc_plan plan; + struct rtw89_mcc_courtesy courtesy; +}; + +struct rtw89_mcc_sync { + bool enable; + u16 offset; /* TU */ + u8 macid_src; + u8 macid_tgt; +}; + +struct rtw89_mcc_config { + struct rtw89_mcc_pattern pattern; + struct rtw89_mcc_sync sync; + u64 start_tsf; + u16 mcc_interval; /* TU */ + u16 beacon_offset; /* TU */ +}; + +enum rtw89_mcc_mode { + RTW89_MCC_MODE_GO_STA, + RTW89_MCC_MODE_GC_STA, +}; + struct rtw89_mcc_info { struct rtw89_wait_info wait; + + u8 group; + enum rtw89_mcc_mode mode; + struct rtw89_mcc_role role_ref; /* reference role */ + struct rtw89_mcc_role role_aux; /* auxiliary role */ + struct rtw89_mcc_bt_role bt_role; + struct rtw89_mcc_config config; }; struct rtw89_dev { @@ -4378,6 +4652,7 @@ struct rtw89_dev { struct rtw89_hci_info hci; struct rtw89_efuse efuse; struct rtw89_traffic_stats stats; + struct rtw89_rfe_data *rfe_data; /* ensures exclusive access from mac80211 callbacks */ struct mutex mutex; @@ -4425,7 +4700,7 @@ struct rtw89_dev { bool is_bt_iqk_timeout; struct rtw89_fem_info fem; - struct rtw89_txpwr_byrate byr[RTW89_BAND_NUM]; + struct rtw89_txpwr_byrate byr[RTW89_BAND_NUM][RTW89_BYR_BW_NUM]; struct rtw89_tssi_info tssi; struct rtw89_power_trim_info pwr_trim; @@ -4910,6 +5185,30 @@ enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width) } } +static inline +enum nl80211_he_ru_alloc rtw89_he_rua_to_ru_alloc(u16 rua) +{ + switch (rua) { + default: + WARN(1, "Invalid RU allocation: %d\n", rua); + fallthrough; + case 0 ... 36: + return NL80211_RATE_INFO_HE_RU_ALLOC_26; + case 37 ... 52: + return NL80211_RATE_INFO_HE_RU_ALLOC_52; + case 53 ... 60: + return NL80211_RATE_INFO_HE_RU_ALLOC_106; + case 61 ... 64: + return NL80211_RATE_INFO_HE_RU_ALLOC_242; + case 65 ... 66: + return NL80211_RATE_INFO_HE_RU_ALLOC_484; + case 67: + return NL80211_RATE_INFO_HE_RU_ALLOC_996; + case 68: + return NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + } +} + static inline struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta) @@ -5017,6 +5316,15 @@ static inline void rtw89_chip_rfe_gpio(struct rtw89_dev *rtwdev) chip->ops->rfe_gpio(rtwdev); } +static inline +void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->bb_preinit) + chip->ops->bb_preinit(rtwdev, phy_idx); +} + static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -5112,13 +5420,13 @@ static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev, chip->ops->query_ppdu(rtwdev, phy_ppdu, status); } -static inline void rtw89_chip_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, - bool bt_en) +static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->ops->bb_ctrl_btc_preagc) - chip->ops->bb_ctrl_btc_preagc(rtwdev, bt_en); + if (chip->ops->ctrl_nbtg_bt_tx) + chip->ops->ctrl_nbtg_bt_tx(rtwdev, en, phy_idx); } static inline void rtw89_chip_cfg_txrx_path(struct rtw89_dev *rtwdev) @@ -5156,12 +5464,13 @@ static inline u8 rtw89_regd_get(struct rtw89_dev *rtwdev, u8 band) return regd->txpwr_regd[band]; } -static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static inline void rtw89_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->ops->ctrl_btg) - chip->ops->ctrl_btg(rtwdev, btg); + if (chip->ops->ctrl_btg_bt_rx) + chip->ops->ctrl_btg_bt_rx(rtwdev, en, phy_idx); } static inline @@ -5333,15 +5642,24 @@ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); +void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); +void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); void rtw89_core_rx(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, struct sk_buff *skb); void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, u8 *data, u32 data_offset); +void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + u8 *data, u32 data_offset); void rtw89_core_napi_start(struct rtw89_dev *rtwdev); void rtw89_core_napi_stop(struct rtw89_dev *rtwdev); void rtw89_core_napi_init(struct rtw89_dev *rtwdev); @@ -5410,6 +5728,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan); void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool active); +void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event); #endif diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index d162e64f60..a3f795d240 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -367,7 +367,11 @@ static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v) } struct txpwr_ent { - const char *txt; + bool nested; + union { + const char *txt; + const struct txpwr_ent *ptr; + }; u8 len; }; @@ -379,6 +383,12 @@ struct txpwr_map { u32 addr_to_1ss; }; +#define __GEN_TXPWR_ENT_NESTED(_e) \ + { .nested = true, .ptr = __txpwr_ent_##_e, \ + .len = ARRAY_SIZE(__txpwr_ent_##_e) } + +#define __GEN_TXPWR_ENT0(_t) { .len = 0, .txt = _t } + #define __GEN_TXPWR_ENT2(_t, _e0, _e1) \ { .len = 2, .txt = _t "\t- " _e0 " " _e1 } @@ -390,7 +400,7 @@ struct txpwr_map { _e0 " " _e1 " " _e2 " " _e3 " " \ _e4 " " _e5 " " _e6 " " _e7 } -static const struct txpwr_ent __txpwr_ent_byr[] = { +static const struct txpwr_ent __txpwr_ent_byr_ax[] = { __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), @@ -406,18 +416,18 @@ static const struct txpwr_ent __txpwr_ent_byr[] = { __GEN_TXPWR_ENT4("HEDCM_2NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), }; -static_assert((ARRAY_SIZE(__txpwr_ent_byr) * 4) == +static_assert((ARRAY_SIZE(__txpwr_ent_byr_ax) * 4) == (R_AX_PWR_BY_RATE_MAX - R_AX_PWR_BY_RATE + 4)); -static const struct txpwr_map __txpwr_map_byr = { - .ent = __txpwr_ent_byr, - .size = ARRAY_SIZE(__txpwr_ent_byr), +static const struct txpwr_map __txpwr_map_byr_ax = { + .ent = __txpwr_ent_byr_ax, + .size = ARRAY_SIZE(__txpwr_ent_byr_ax), .addr_from = R_AX_PWR_BY_RATE, .addr_to = R_AX_PWR_BY_RATE_MAX, .addr_to_1ss = R_AX_PWR_BY_RATE_1SS_MAX, }; -static const struct txpwr_ent __txpwr_ent_lmt[] = { +static const struct txpwr_ent __txpwr_ent_lmt_ax[] = { /* 1TX */ __GEN_TXPWR_ENT2("CCK_1TX_20M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("CCK_1TX_40M ", "NON_BF", "BF"), @@ -462,18 +472,18 @@ static const struct txpwr_ent __txpwr_ent_lmt[] = { __GEN_TXPWR_ENT2("MCS_2TX_40M_2p5", "NON_BF", "BF"), }; -static_assert((ARRAY_SIZE(__txpwr_ent_lmt) * 2) == +static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ax) * 2) == (R_AX_PWR_LMT_MAX - R_AX_PWR_LMT + 4)); -static const struct txpwr_map __txpwr_map_lmt = { - .ent = __txpwr_ent_lmt, - .size = ARRAY_SIZE(__txpwr_ent_lmt), +static const struct txpwr_map __txpwr_map_lmt_ax = { + .ent = __txpwr_ent_lmt_ax, + .size = ARRAY_SIZE(__txpwr_ent_lmt_ax), .addr_from = R_AX_PWR_LMT, .addr_to = R_AX_PWR_LMT_MAX, .addr_to_1ss = R_AX_PWR_LMT_1SS_MAX, }; -static const struct txpwr_ent __txpwr_ent_lmt_ru[] = { +static const struct txpwr_ent __txpwr_ent_lmt_ru_ax[] = { /* 1TX */ __GEN_TXPWR_ENT8("1TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3", "RU26__4", "RU26__5", "RU26__6", "RU26__7"), @@ -490,25 +500,207 @@ static const struct txpwr_ent __txpwr_ent_lmt_ru[] = { "RU106_4", "RU106_5", "RU106_6", "RU106_7"), }; -static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru) * 8) == +static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru_ax) * 8) == (R_AX_PWR_RU_LMT_MAX - R_AX_PWR_RU_LMT + 4)); -static const struct txpwr_map __txpwr_map_lmt_ru = { - .ent = __txpwr_ent_lmt_ru, - .size = ARRAY_SIZE(__txpwr_ent_lmt_ru), +static const struct txpwr_map __txpwr_map_lmt_ru_ax = { + .ent = __txpwr_ent_lmt_ru_ax, + .size = ARRAY_SIZE(__txpwr_ent_lmt_ru_ax), .addr_from = R_AX_PWR_RU_LMT, .addr_to = R_AX_PWR_RU_LMT_MAX, .addr_to_1ss = R_AX_PWR_RU_LMT_1SS_MAX, }; -static u8 __print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent, - const s8 *buf, const u8 cur) +static const struct txpwr_ent __txpwr_ent_byr_mcs_be[] = { + __GEN_TXPWR_ENT4("MCS_1SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("MCS_1SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("MCS_1SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("MCS_1SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("HEDCM_1SS ", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), + __GEN_TXPWR_ENT4("DLRU_MCS_1SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("DLRU_MCS_1SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("DLRU_MCS_1SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("DLRU_MCS_1SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("DLRU_HEDCM_1SS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), + __GEN_TXPWR_ENT4("MCS_2SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("MCS_2SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("MCS_2SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("MCS_2SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("HEDCM_2SS ", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), + __GEN_TXPWR_ENT4("DLRU_MCS_2SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("DLRU_MCS_2SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("DLRU_MCS_2SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("DLRU_MCS_2SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("DLRU_HEDCM_2SS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), +}; + +static const struct txpwr_ent __txpwr_ent_byr_be[] = { + __GEN_TXPWR_ENT0("BW20"), + __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + __GEN_TXPWR_ENT0("BW40"), + __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + /* there is no CCK section after BW80 */ + __GEN_TXPWR_ENT0("BW80"), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + __GEN_TXPWR_ENT0("BW160"), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + __GEN_TXPWR_ENT0("BW320"), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), +}; + +static const struct txpwr_map __txpwr_map_byr_be = { + .ent = __txpwr_ent_byr_be, + .size = ARRAY_SIZE(__txpwr_ent_byr_be), + .addr_from = R_BE_PWR_BY_RATE, + .addr_to = R_BE_PWR_BY_RATE_MAX, + .addr_to_1ss = 0, /* not support */ +}; + +static const struct txpwr_ent __txpwr_ent_lmt_mcs_be[] = { + __GEN_TXPWR_ENT2("MCS_20M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_4 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_5 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_6 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_7 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_8 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_9 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_10 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_11 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_12 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_13 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_14 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_15 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_4 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_5 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_6 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_7 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_160M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_160M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_320M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_0p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_2p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_4p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_6p5", "NON_BF", "BF"), +}; + +static const struct txpwr_ent __txpwr_ent_lmt_be[] = { + __GEN_TXPWR_ENT0("1TX"), + __GEN_TXPWR_ENT2("CCK_20M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("CCK_40M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("OFDM ", "NON_BF", "BF"), + __GEN_TXPWR_ENT_NESTED(lmt_mcs_be), + + __GEN_TXPWR_ENT0("2TX"), + __GEN_TXPWR_ENT2("CCK_20M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("CCK_40M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("OFDM ", "NON_BF", "BF"), + __GEN_TXPWR_ENT_NESTED(lmt_mcs_be), +}; + +static const struct txpwr_map __txpwr_map_lmt_be = { + .ent = __txpwr_ent_lmt_be, + .size = ARRAY_SIZE(__txpwr_ent_lmt_be), + .addr_from = R_BE_PWR_LMT, + .addr_to = R_BE_PWR_LMT_MAX, + .addr_to_1ss = 0, /* not support */ +}; + +static const struct txpwr_ent __txpwr_ent_lmt_ru_indexes_be[] = { + __GEN_TXPWR_ENT8("RU26 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU26 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU52 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU52 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU106 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU106 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU52_26 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU52_26 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU106_26", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU106_26", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), +}; + +static const struct txpwr_ent __txpwr_ent_lmt_ru_be[] = { + __GEN_TXPWR_ENT0("1TX"), + __GEN_TXPWR_ENT_NESTED(lmt_ru_indexes_be), + + __GEN_TXPWR_ENT0("2TX"), + __GEN_TXPWR_ENT_NESTED(lmt_ru_indexes_be), +}; + +static const struct txpwr_map __txpwr_map_lmt_ru_be = { + .ent = __txpwr_ent_lmt_ru_be, + .size = ARRAY_SIZE(__txpwr_ent_lmt_ru_be), + .addr_from = R_BE_PWR_RU_LMT, + .addr_to = R_BE_PWR_RU_LMT_MAX, + .addr_to_1ss = 0, /* not support */ +}; + +static unsigned int +__print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent, + const s8 *buf, const unsigned int cur) { + unsigned int cnt, i; char *fmt; + if (ent->nested) { + for (cnt = 0, i = 0; i < ent->len; i++) + cnt += __print_txpwr_ent(m, ent->ptr + i, buf, + cur + cnt); + return cnt; + } + switch (ent->len) { + case 0: + seq_printf(m, "\t<< %s >>\n", ent->txt); + return 0; case 2: - fmt = "%s\t| %3d, %3d,\tdBm\n"; + fmt = "%s\t| %3d, %3d,\t\tdBm\n"; seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]); return 2; case 4: @@ -532,10 +724,10 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, { u8 fct = rtwdev->chip->txpwr_factor_mac; u8 path_num = rtwdev->chip->rf_path_num; + unsigned int cur, i; u32 max_valid_addr; u32 val, addr; s8 *buf, tmp; - u8 cur, i; int ret; buf = vzalloc(map->addr_to - map->addr_from + 4); @@ -547,6 +739,9 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, else max_valid_addr = map->addr_to; + if (max_valid_addr == 0) + return -EOPNOTSUPP; + for (addr = map->addr_from; addr <= max_valid_addr; addr += 4) { ret = rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, addr, &val); if (ret) @@ -600,10 +795,35 @@ static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev, #undef case_REGD +struct dbgfs_txpwr_table { + const struct txpwr_map *byr; + const struct txpwr_map *lmt; + const struct txpwr_map *lmt_ru; +}; + +static const struct dbgfs_txpwr_table dbgfs_txpwr_table_ax = { + .byr = &__txpwr_map_byr_ax, + .lmt = &__txpwr_map_lmt_ax, + .lmt_ru = &__txpwr_map_lmt_ru_ax, +}; + +static const struct dbgfs_txpwr_table dbgfs_txpwr_table_be = { + .byr = &__txpwr_map_byr_be, + .lmt = &__txpwr_map_lmt_be, + .lmt_ru = &__txpwr_map_lmt_ru_be, +}; + +static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] = { + [RTW89_CHIP_AX] = &dbgfs_txpwr_table_ax, + [RTW89_CHIP_BE] = &dbgfs_txpwr_table_be, +}; + static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; + const struct dbgfs_txpwr_table *tbl; const struct rtw89_chan *chan; int ret = 0; @@ -620,18 +840,24 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) seq_puts(m, "[TAS]\n"); rtw89_print_tas(m, rtwdev); + tbl = dbgfs_txpwr_tables[chip_gen]; + if (!tbl) { + ret = -EOPNOTSUPP; + goto err; + } + seq_puts(m, "\n[TX power byrate]\n"); - ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_byr); + ret = __print_txpwr_map(m, rtwdev, tbl->byr); if (ret) goto err; seq_puts(m, "\n[TX power limit]\n"); - ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt); + ret = __print_txpwr_map(m, rtwdev, tbl->lmt); if (ret) goto err; seq_puts(m, "\n[TX power limit_ru]\n"); - ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt_ru); + ret = __print_txpwr_map(m, rtwdev, tbl->lmt_ru); if (ret) goto err; @@ -3241,6 +3467,11 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) [NL80211_RATE_INFO_HE_GI_1_6] = "1.6", [NL80211_RATE_INFO_HE_GI_3_2] = "3.2", }; + static const char * const eht_gi_str[] = { + [NL80211_RATE_INFO_EHT_GI_0_8] = "0.8", + [NL80211_RATE_INFO_EHT_GI_1_6] = "1.6", + [NL80211_RATE_INFO_EHT_GI_3_2] = "3.2", + }; struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct rate_info *rate = &rtwsta->ra_report.txrate; struct ieee80211_rx_status *status = &rtwsta->rx_status; @@ -3266,6 +3497,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs, rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? he_gi_str[rate->he_gi] : "N/A"); + else if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) + seq_printf(m, "EHT %dSS MCS-%d GI:%s", rate->nss, rate->mcs, + rate->eht_gi < ARRAY_SIZE(eht_gi_str) ? + eht_gi_str[rate->eht_gi] : "N/A"); else seq_printf(m, "Legacy %d", rate->legacy); seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : ""); @@ -3294,6 +3529,11 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? he_gi_str[rate->he_gi] : "N/A"); break; + case RX_ENC_EHT: + seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx, + status->eht.gi < ARRAY_SIZE(eht_gi_str) ? + eht_gi_str[status->eht.gi] : "N/A"); + break; } seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw)); seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index df1dc2f43c..313ed4c454 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -13,6 +13,20 @@ #include "reg.h" #include "util.h" +union rtw89_fw_element_arg { + size_t offset; + enum rtw89_rf_path rf_path; + enum rtw89_fw_type fw_type; +}; + +struct rtw89_fw_element_handler { + int (*fn)(struct rtw89_dev *rtwdev, + const struct rtw89_fw_element_hdr *elm, + const union rtw89_fw_element_arg arg); + const union rtw89_fw_element_arg arg; + const char *name; +}; + static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb); static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, @@ -47,22 +61,15 @@ struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); } -static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) -{ - u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); - - return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); -} - -#define FWDL_WAIT_CNT 400000 -int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) +int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u8 val; int ret; - ret = read_poll_timeout_atomic(_fw_get_rdy, val, + ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, val == RTW89_FWDL_WCPU_FW_INIT_RDY, - 1, FWDL_WAIT_CNT, false, rtwdev); + 1, FWDL_WAIT_CNT, false, rtwdev, type); if (ret) { switch (val) { case RTW89_FWDL_CHECKSUM_FAIL: @@ -78,6 +85,7 @@ int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) return -EINVAL; default: + rtw89_err(rtwdev, "fw unexpected status %d\n", val); return -EBUSY; } } @@ -390,9 +398,9 @@ int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, static int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, const struct rtw89_fw_element_hdr *elm, - const void *data) + const union rtw89_fw_element_arg arg) { - enum rtw89_fw_type type = (enum rtw89_fw_type)data; + enum rtw89_fw_type type = arg.fw_type; struct rtw89_fw_suit *fw_suit; fw_suit = rtw89_fw_suit_get(rtwdev, type); @@ -548,7 +556,7 @@ normal_done: static int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, const struct rtw89_fw_element_hdr *elm, - const void *data) + const union rtw89_fw_element_arg arg) { struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; struct rtw89_phy_table *tbl; @@ -572,7 +580,7 @@ int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, case RTW89_FW_ELEMENT_ID_RADIO_B: case RTW89_FW_ELEMENT_ID_RADIO_C: case RTW89_FW_ELEMENT_ID_RADIO_D: - rf_path = (enum rtw89_rf_path)data; + rf_path = arg.rf_path; idx = elm->u.reg2.idx; elm_info->rf_radio[idx] = tbl; @@ -607,29 +615,101 @@ out: return -ENOMEM; } -struct rtw89_fw_element_handler { - int (*fn)(struct rtw89_dev *rtwdev, - const struct rtw89_fw_element_hdr *elm, const void *data); - const void *data; - const char *name; -}; +static +int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, + const struct rtw89_fw_element_hdr *elm, + const union rtw89_fw_element_arg arg) +{ + const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; + const unsigned long offset = arg.offset; + struct rtw89_efuse *efuse = &rtwdev->efuse; + struct rtw89_txpwr_conf *conf; + + if (!rtwdev->rfe_data) { + rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); + if (!rtwdev->rfe_data) + return -ENOMEM; + } + + conf = (void *)rtwdev->rfe_data + offset; + + /* if multiple matched, take the last eventually */ + if (txpwr_elm->rfe_type == efuse->rfe_type) + goto setup; + + /* without one is matched, accept default */ + if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && + (!rtw89_txpwr_conf_valid(conf) || + conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) + goto setup; + + rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", + elm->id, txpwr_elm->rfe_type); + return 0; + +setup: + rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", + elm->id, txpwr_elm->rfe_type); + + conf->rfe_type = txpwr_elm->rfe_type; + conf->ent_sz = txpwr_elm->ent_sz; + conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); + conf->data = txpwr_elm->content; + return 0; +} static const struct rtw89_fw_element_handler __fw_element_handlers[] = { [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, - (const void *)RTW89_FW_BBMCU0, NULL}, + { .fw_type = RTW89_FW_BBMCU0 }, NULL}, [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, - (const void *)RTW89_FW_BBMCU1, NULL}, - [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, NULL, "BB"}, - [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, NULL, NULL}, + { .fw_type = RTW89_FW_BBMCU1 }, NULL}, + [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, + [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, - (const void *)RF_PATH_A, "radio A"}, + { .rf_path = RF_PATH_A }, "radio A"}, [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, - (const void *)RF_PATH_B, NULL}, + { .rf_path = RF_PATH_B }, NULL}, [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, - (const void *)RF_PATH_C, NULL}, + { .rf_path = RF_PATH_C }, NULL}, [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, - (const void *)RF_PATH_D, NULL}, - [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, NULL, "NCTL"}, + { .rf_path = RF_PATH_D }, NULL}, + [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, + [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, + }, }; int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) @@ -669,7 +749,7 @@ int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) if (!handler->fn) goto next; - ret = handler->fn(rtwdev, hdr, handler->data); + ret = handler->fn(rtwdev, hdr, handler->arg); if (ret) return ret; @@ -768,7 +848,7 @@ fail: static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) { - u8 val; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; int ret; ret = __rtw89_fw_download_hdr(rtwdev, fw, len); @@ -777,9 +857,7 @@ static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len return ret; } - ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, - 1, FWDL_WAIT_CNT, false, - rtwdev, R_AX_WCPU_FW_CTRL); + ret = mac->fwdl_check_path_ready(rtwdev, false); if (ret) { rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); return ret; @@ -831,10 +909,27 @@ fail: return ret; } -static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, +static enum rtw89_fwdl_check_type +rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, + const struct rtw89_fw_suit *fw_suit) +{ + switch (fw_suit->type) { + case RTW89_FW_BBMCU0: + return RTW89_FWDL_CHECK_BB0_FWDL_DONE; + case RTW89_FW_BBMCU1: + return RTW89_FWDL_CHECK_BB1_FWDL_DONE; + default: + return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; + } +} + +static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, + const struct rtw89_fw_suit *fw_suit, struct rtw89_fw_bin_info *info) { struct rtw89_fw_hdr_section_info *section_info = info->section_info; + const struct rtw89_chip_info *chip = rtwdev->chip; + enum rtw89_fwdl_check_type chk_type; u8 section_num = info->section_num; int ret; @@ -845,11 +940,14 @@ static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, section_info++; } - mdelay(5); + if (chip->chip_gen == RTW89_CHIP_AX) + return 0; - ret = rtw89_fw_check_rdy(rtwdev); + chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); + ret = rtw89_fw_check_rdy(rtwdev, chk_type); if (ret) { - rtw89_warn(rtwdev, "download firmware fail\n"); + rtw89_warn(rtwdev, "failed to download firmware type %u\n", + fw_suit->type); return ret; } @@ -887,44 +985,66 @@ static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) rtw89_fw_prog_cnt_dump(rtwdev); } -int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) +static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, + struct rtw89_fw_suit *fw_suit) { - struct rtw89_fw_info *fw_info = &rtwdev->fw; - struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct rtw89_fw_bin_info info; - u8 val; int ret; - rtw89_mac_disable_cpu(rtwdev); - ret = rtw89_mac_enable_cpu(rtwdev, 0, true); - if (ret) - return ret; - ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); if (ret) { rtw89_err(rtwdev, "parse fw header fail\n"); - goto fwdl_err; + return ret; } - ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, - 1, FWDL_WAIT_CNT, false, - rtwdev, R_AX_WCPU_FW_CTRL); + if (rtwdev->chip->chip_id == RTL8922A && + (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) + rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); + + ret = mac->fwdl_check_path_ready(rtwdev, true); if (ret) { rtw89_err(rtwdev, "[ERR]H2C path ready\n"); - goto fwdl_err; + return ret; } ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len - info.dynamic_hdr_len); - if (ret) { - ret = -EBUSY; - goto fwdl_err; - } + if (ret) + return ret; - ret = rtw89_fw_download_main(rtwdev, fw_suit->data, &info); - if (ret) { - ret = -EBUSY; + ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); + if (ret) + return ret; + + return 0; +} + +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + struct rtw89_fw_info *fw_info = &rtwdev->fw; + struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); + u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; + int ret; + int i; + + mac->disable_cpu(rtwdev); + ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); + if (ret) + return ret; + + ret = rtw89_fw_download_suit(rtwdev, fw_suit); + if (ret) goto fwdl_err; + + for (i = 0; i < bbmcu_nr && include_bb; i++) { + fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); + + ret = rtw89_fw_download_suit(rtwdev, fw_suit); + if (ret) + goto fwdl_err; } fw_info->h2c_seq = 0; @@ -934,6 +1054,14 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; + mdelay(5); + + ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); + if (ret) { + rtw89_warn(rtwdev, "download firmware fail\n"); + return ret; + } + return ret; fwdl_err: @@ -3178,11 +3306,11 @@ fail: int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; struct rtw89_fw_h2c_rf_get_mccch *mccch; struct sk_buff *skb; int ret; + u8 idx; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); if (!skb) { @@ -3192,12 +3320,13 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) skb_put(skb, sizeof(*mccch)); mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; + idx = rfk_mcc->table_idx; mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); - mccch->current_channel = cpu_to_le32(chan->channel); - mccch->current_band_type = cpu_to_le32(chan->band_type); + mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); + mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, @@ -3889,6 +4018,8 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rx_fltr); + + rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); } void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, @@ -3912,6 +4043,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, rtw89_core_scan_complete(rtwdev, vif, true); ieee80211_scan_completed(rtwdev->hw, &info); ieee80211_wake_queues(rtwdev->hw); + rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); rtw89_release_pkt_list(rtwdev); rtwvif = (struct rtw89_vif *)vif->drv_priv; @@ -3920,7 +4052,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, scan_info->last_chan_idx = 0; scan_info->scanning_vif = NULL; - rtw89_set_channel(rtwdev); + rtw89_chanctx_proceed(rtwdev); } void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) @@ -3929,6 +4061,19 @@ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) rtw89_hw_scan_complete(rtwdev, vif, true); } +static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) +{ + struct rtw89_vif *rtwvif; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + /* This variable implies connected or during attempt to connect */ + if (!is_zero_ether_addr(rtwvif->bssid)) + return true; + } + + return false; +} + int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool enable) { @@ -3941,8 +4086,7 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, if (!rtwvif) return -EINVAL; - /* This variable implies connected or during attempt to connect */ - connected = !is_zero_ether_addr(rtwvif->bssid); + connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); opt.enable = enable; opt.target_ch_mode = connected; if (enable) { @@ -4520,7 +4664,7 @@ int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, } #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 -int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, +int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, u8 *bitmap) { struct rtw89_wait_info *wait = &rtwdev->mcc.wait; @@ -4623,3 +4767,454 @@ int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); } + +static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) +{ + static const u8 zeros[U8_MAX] = {}; + + return memcmp(ext_ptr, zeros, ext_len) == 0; +} + +#define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ +({ \ + u8 __var_sz = sizeof(*(e)); \ + bool __accept; \ + if (__var_sz >= (ent_sz)) \ + __accept = true; \ + else \ + __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ + (ent_sz) - __var_sz);\ + __accept; \ +}) + +static bool +fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) + return false; + + switch (e->rs) { + case RTW89_RS_CCK: + if (e->shf + e->len > RTW89_RATE_CCK_NUM) + return false; + break; + case RTW89_RS_OFDM: + if (e->shf + e->len > RTW89_RATE_OFDM_NUM) + return false; + break; + case RTW89_RS_MCS: + if (e->shf + e->len > __RTW89_RATE_MCS_NUM || + e->nss >= RTW89_NSS_NUM || + e->ofdma >= RTW89_OFDMA_NUM) + return false; + break; + case RTW89_RS_HEDCM: + if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || + e->nss >= RTW89_NSS_HEDCM_NUM || + e->ofdma >= RTW89_OFDMA_NUM) + return false; + break; + case RTW89_RS_OFFSET: + if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) + return false; + break; + default: + return false; + } + + return true; +} + +static +void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, + const struct rtw89_txpwr_table *tbl) +{ + const struct rtw89_txpwr_conf *conf = tbl->data; + struct rtw89_fw_txpwr_byrate_entry entry = {}; + struct rtw89_txpwr_byrate *byr_head; + struct rtw89_rate_desc desc = {}; + const void *cursor; + u32 data; + s8 *byr; + int i; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) + continue; + + byr_head = &rtwdev->byr[entry.band][entry.bw]; + data = le32_to_cpu(entry.data); + desc.ofdma = entry.ofdma; + desc.nss = entry.nss; + desc.rs = entry.rs; + + for (i = 0; i < entry.len; i++, data >>= 8) { + desc.idx = entry.shf + i; + byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); + *byr = data & 0xff; + } + } +} + +static bool +fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->bw >= RTW89_2G_BW_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->rs >= RTW89_RS_LMT_NUM) + return false; + if (e->bf >= RTW89_BF_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_2G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] + [entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->bw >= RTW89_5G_BW_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->rs >= RTW89_RS_LMT_NUM) + return false; + if (e->bf >= RTW89_BF_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_5G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] + [entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->bw >= RTW89_6G_BW_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->rs >= RTW89_RS_LMT_NUM) + return false; + if (e->bf >= RTW89_BF_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) + return false; + if (e->ch_idx >= RTW89_6G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] + [entry.reg_6ghz_power][entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->ru >= RTW89_RU_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_2G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->ru >= RTW89_RU_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_5G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->ru >= RTW89_RU_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) + return false; + if (e->ch_idx >= RTW89_6G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] + [entry.ch_idx] = entry.v; + } +} + +static bool +fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->band >= RTW89_BAND_NUM) + return false; + if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_tx_shape_lmt_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; + } +} + +static bool +fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->band >= RTW89_BAND_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.band][entry.regd] = entry.v; + } +} + +const struct rtw89_rfe_parms * +rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, + const struct rtw89_rfe_parms *init) +{ + struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; + struct rtw89_rfe_parms *parms; + + if (!rfe_data) + return init; + + parms = &rfe_data->rfe_parms; + if (init) + *parms = *init; + + if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { + rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; + rfe_data->byrate.tbl.size = 0; /* don't care here */ + rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; + parms->byr_tbl = &rfe_data->byrate.tbl; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { + rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); + parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { + rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); + parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { + rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); + parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); + parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); + parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); + parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { + rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); + parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { + rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); + parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; + } + + return parms; +} diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 775f4e8fbd..d4db9ab0b5 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -2931,6 +2931,11 @@ static inline void RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(void *cmd, u32 val) le32p_replace_bits((__le32 *)cmd + 3, val, GENMASK(23, 16)); } +enum rtw89_fw_mcc_old_group_actions { + RTW89_FW_MCC_OLD_GROUP_ACT_NONE = 0, + RTW89_FW_MCC_OLD_GROUP_ACT_REPLACE = 1, +}; + struct rtw89_fw_mcc_start_req { u32 group: 2; u32 btc_in_group: 1; @@ -3412,10 +3417,46 @@ enum rtw89_fw_element_id { RTW89_FW_ELEMENT_ID_RADIO_C = 6, RTW89_FW_ELEMENT_ID_RADIO_D = 7, RTW89_FW_ELEMENT_ID_RF_NCTL = 8, + RTW89_FW_ELEMENT_ID_TXPWR_BYRATE = 9, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ = 10, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ = 11, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ = 12, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ = 13, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ = 14, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ = 15, + RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT = 16, + RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU = 17, RTW89_FW_ELEMENT_ID_NUM, }; +#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \ + (BIT(RTW89_FW_ELEMENT_ID_TXPWR_BYRATE) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT) | \ + BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU)) + +#define RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS (BIT(RTW89_FW_ELEMENT_ID_BBMCU0) | \ + BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \ + BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \ + BIT(RTW89_FW_ELEMENT_ID_RADIO_B) | \ + BIT(RTW89_FW_ELEMENT_ID_RF_NCTL) | \ + BITS_OF_RTW89_TXPWR_FW_ELEMENTS) + +struct __rtw89_fw_txpwr_element { + u8 rsvd0; + u8 rsvd1; + u8 rfe_type; + u8 ent_sz; + __le32 num_ents; + u8 content[]; +} __packed; + struct rtw89_fw_element_hdr { __le32 id; /* enum rtw89_fw_element_id */ __le32 size; /* exclude header size */ @@ -3436,6 +3477,7 @@ struct rtw89_fw_element_hdr { __le32 data; } __packed regs[]; } __packed reg2; + struct __rtw89_fw_txpwr_element txpwr; } __packed u; } __packed; @@ -3618,7 +3660,9 @@ struct rtw89_fw_h2c_rf_get_mccch { #define RTW89_FW_BACKTRACE_MAX_SIZE 512 /* 8 * 64 (entries) */ #define RTW89_FW_BACKTRACE_KEY 0xBACEBACE -int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev); +#define FWDL_WAIT_CNT 400000 + +int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type); int rtw89_fw_recognize(struct rtw89_dev *rtwdev); int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev); const struct firmware * @@ -3626,7 +3670,8 @@ rtw89_early_fw_feature_recognize(struct device *device, const struct rtw89_chip_info *chip, struct rtw89_fw_info *early_fw, int *used_fw_format); -int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type); +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb); void rtw89_load_firmware_work(struct work_struct *work); void rtw89_unload_firmware(struct rtw89_dev *rtwdev); int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev); @@ -3755,7 +3800,7 @@ int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group); int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, const struct rtw89_fw_mcc_tsf_req *req, struct rtw89_mac_mcc_tsf_rpt *rpt); -int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, +int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, u8 *bitmap); int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, u8 target, u8 offset); @@ -3770,4 +3815,97 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev) rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(rtwdev); } +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_byrate_entry { + u8 band; + u8 nss; + u8 rs; + u8 shf; + u8 len; + __le32 data; + u8 bw; + u8 ofdma; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_2ghz_entry { + u8 bw; + u8 nt; + u8 rs; + u8 bf; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_5ghz_entry { + u8 bw; + u8 nt; + u8 rs; + u8 bf; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_6ghz_entry { + u8 bw; + u8 nt; + u8 rs; + u8 bf; + u8 regd; + u8 reg_6ghz_power; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_ru_2ghz_entry { + u8 ru; + u8 nt; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_ru_5ghz_entry { + u8 ru; + u8 nt; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_ru_6ghz_entry { + u8 ru; + u8 nt; + u8 regd; + u8 reg_6ghz_power; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_tx_shape_lmt_entry { + u8 band; + u8 tx_shape_rs; + u8 regd; + u8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_tx_shape_lmt_ru_entry { + u8 band; + u8 regd; + u8 v; +} __packed; + +const struct rtw89_rfe_parms * +rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, + const struct rtw89_rfe_parms *init); + #endif diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index fab9f5004a..d0c7de4e80 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -3452,7 +3452,7 @@ static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev) rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL); } -void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) +static void rtw89_mac_disable_cpu_ax(struct rtw89_dev *rtwdev) { clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); @@ -3467,7 +3467,8 @@ void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); } -int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw) +static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason, + bool dlfw, bool include_bb) { u32 val; int ret; @@ -3505,7 +3506,7 @@ int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw) if (!dlfw) { mdelay(5); - ret = rtw89_fw_check_rdy(rtwdev); + ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); if (ret) return ret; } @@ -3592,7 +3593,7 @@ int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) } EXPORT_SYMBOL(rtw89_mac_disable_bb_rf); -int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) +int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb) { int ret; @@ -3606,6 +3607,12 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); + if (include_bb) { + rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_0); + if (rtwdev->dbcc_en) + rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_1); + } + ret = rtw89_mac_dmac_pre_init(rtwdev); if (ret) return ret; @@ -3616,7 +3623,7 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) return ret; } - ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL); + ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL, include_bb); if (ret) return ret; @@ -3625,9 +3632,11 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) int rtw89_mac_init(struct rtw89_dev *rtwdev) { + const struct rtw89_chip_info *chip = rtwdev->chip; + bool include_bb = !!chip->bbmcu_nr; int ret; - ret = rtw89_mac_partial_init(rtwdev); + ret = rtw89_mac_partial_init(rtwdev, include_bb); if (ret) goto fail; @@ -3712,7 +3721,7 @@ int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) return 0; } -static const struct rtw89_port_reg rtw_port_base = { +static const struct rtw89_port_reg rtw89_port_base_ax = { .port_cfg = R_AX_PORT_CFG_P0, .tbtt_prohib = R_AX_TBTT_PROHIB_P0, .bcn_area = R_AX_BCN_AREA_P0, @@ -3727,9 +3736,61 @@ static const struct rtw89_port_reg rtw_port_base = { .tbtt_shift = R_AX_TBTT_SHIFT_P0, .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, .tsftr_l = R_AX_TSFTR_LOW_P0, - .tsftr_h = R_AX_TSFTR_HIGH_P0 + .tsftr_h = R_AX_TSFTR_HIGH_P0, + .md_tsft = R_AX_MD_TSFT_STMP_CTL, + .bss_color = R_AX_PTCL_BSS_COLOR_0, + .mbssid = R_AX_MBSSID_CTRL, + .mbssid_drop = R_AX_MBSSID_DROP_0, + .tsf_sync = R_AX_PORT0_TSF_SYNC, + .hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, + R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, + R_AX_PORT_HGQ_WINDOW_CFG + 3}, }; +static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, u8 type) +{ + u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif->port); + u32 reg_info, reg_ctrl; + u32 val; + int ret; + + reg_info = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG_INFO, rtwvif->mac_idx); + reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG, rtwvif->mac_idx); + + rtw89_write32_mask(rtwdev, reg_ctrl, B_AX_PTCL_DBG_SEL_MASK, type); + rtw89_write32_set(rtwdev, reg_ctrl, B_AX_PTCL_DBG_EN); + fsleep(100); + + ret = read_poll_timeout(rtw89_read32_mask, val, val == 0, 1000, 100000, + true, rtwdev, reg_info, mask); + if (ret) + rtw89_warn(rtwdev, "Polling beacon packet empty fail\n"); +} + +static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; + + rtw89_write32_set(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port)); + rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, 1); + rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, 0); + rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 0); + rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, 2); + rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK, 1); + rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, 1); + rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); + + rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM0); + if (rtwvif->port == RTW89_PORT_0) + rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM1); + + rtw89_write32_clr(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port)); + rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TBTT_PROHIB_EN); + fsleep(2); +} + #define BCN_INTERVAL 100 #define BCN_ERLY_DEF 160 #define BCN_SETUP_DEF 2 @@ -3742,29 +3803,46 @@ static const struct rtw89_port_reg rtw_port_base = { static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_chip_info *chip = rtwdev->chip; + bool need_backup = false; + u32 backup_val; if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN)) return; - rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK); - rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1); - rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK); - rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK); + if (chip->chip_id == RTL8852A && rtwvif->port != RTW89_PORT_0) { + need_backup = true; + backup_val = rtw89_read32_port(rtwdev, rtwvif, p->tbtt_prohib); + } + + if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) + rtw89_mac_bcn_drop(rtwdev, rtwvif); - msleep(vif->bss_conf.beacon_int + 1); + if (chip->chip_id == RTL8852A) { + rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK); + rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1); + rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK); + rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK); + } + msleep(vif->bss_conf.beacon_int + 1); rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN | B_AX_BRK_SETUP); rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST); rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0); + + if (need_backup) + rtw89_write32_port(rtwdev, rtwvif, p->tbtt_prohib, backup_val); } static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool en) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; if (en) rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); @@ -3775,7 +3853,8 @@ static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool en) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; if (en) rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); @@ -3786,7 +3865,8 @@ static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK, rtwvif->net_type); @@ -3795,7 +3875,8 @@ static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; @@ -3808,7 +3889,8 @@ static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; u32 bit = B_AX_RX_BSSID_FIT_EN; @@ -3822,7 +3904,8 @@ static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; @@ -3833,11 +3916,10 @@ static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, } static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, - struct rtw89_vif *rtwvif) + struct rtw89_vif *rtwvif, bool en) { - const struct rtw89_port_reg *p = &rtw_port_base; - bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || - rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; if (en) rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); @@ -3845,11 +3927,30 @@ static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); } +static void rtw89_mac_port_cfg_tx_sw_by_nettype(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || + rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; + + rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif, en); +} + +void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en) +{ + struct rtw89_vif *rtwvif; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) + rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif, en); +} + static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_port_reg *p = &rtw_port_base; u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL; rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, @@ -3859,27 +3960,25 @@ static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - static const u32 hiq_win_addr[RTW89_PORT_NUM] = { - R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, - R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, - R_AX_PORT_HGQ_WINDOW_CFG + 3, - }; u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u8 port = rtwvif->port; u32 reg; - reg = rtw89_mac_reg_by_idx(rtwdev, hiq_win_addr[port], rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, p->hiq_win[port], rtwvif->mac_idx); rtw89_write8(rtwdev, reg, win); } static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_port_reg *p = &rtw_port_base; u32 addr; - addr = rtw89_mac_reg_by_idx(rtwdev, R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx); + addr = rtw89_mac_reg_by_idx(rtwdev, p->md_tsft, rtwvif->mac_idx); rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE); rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK, @@ -3889,7 +3988,8 @@ static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); @@ -3898,7 +3998,8 @@ static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); @@ -3907,7 +4008,8 @@ static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); @@ -3916,7 +4018,8 @@ static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); @@ -3925,6 +4028,8 @@ static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); static const u32 masks[RTW89_PORT_NUM] = { B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, @@ -3937,7 +4042,7 @@ static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, u8 bss_color; bss_color = vif->bss_conf.he_bss_color.color; - reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0; + reg_base = port >= 4 ? p->bss_color + 4 : p->bss_color; reg = rtw89_mac_reg_by_idx(rtwdev, reg_base, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); } @@ -3945,6 +4050,8 @@ static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u8 port = rtwvif->port; u32 reg; @@ -3952,7 +4059,7 @@ static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, return; if (port == 0) { - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MBSSID_CTRL, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid, rtwvif->mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); } } @@ -3960,11 +4067,13 @@ static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u8 port = rtwvif->port; u32 reg; u32 val; - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MBSSID_DROP_0, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid_drop, rtwvif->mac_idx); val = rtw89_read32(rtwdev, reg); val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); if (port == 0) @@ -3975,7 +4084,8 @@ static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; if (enable) rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, @@ -3988,7 +4098,8 @@ static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, BCN_ERLY_DEF); @@ -3997,7 +4108,8 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u16 val; if (rtwdev->chip->chip_id != RTL8852C) @@ -4019,10 +4131,12 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif_src, u16 offset_tu) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u32 val, reg; val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu); - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PORT0_TSF_SYNC + rtwvif->port * 4, + reg = rtw89_mac_reg_by_idx(rtwdev, p->tsf_sync + rtwvif->port * 4, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_SRC, rtwvif_src->port); @@ -4137,7 +4251,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif); rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif); rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif); - rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif); + rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif); rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif); rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif); rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif); @@ -4160,7 +4274,8 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u64 *tsf) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u32 tsf_low, tsf_high; int ret; @@ -4221,7 +4336,7 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - rtw89_mac_port_cfg_func_en(rtwdev, rtwvif, false); + rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif); } int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) @@ -4298,8 +4413,10 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h, switch (reason) { case RTW89_SCAN_LEAVE_CH_NOTIFY: - if (rtw89_is_op_chan(rtwdev, band, chan)) + if (rtw89_is_op_chan(rtwdev, band, chan)) { + rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false); ieee80211_stop_queues(rtwdev->hw); + } return; case RTW89_SCAN_END_SCAN_NOTIFY: if (rtwvif && rtwvif->scan_req && @@ -4317,6 +4434,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h, if (rtw89_is_op_chan(rtwdev, band, chan)) { rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx, &rtwdev->scan_info.op_chan); + rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); ieee80211_wake_queues(rtwdev->hw); } else { rtw89_chan_create(&new, chan, chan, band, @@ -4479,6 +4597,7 @@ static void rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_TSF32_TOGGLE_CHANGE); } static void @@ -4733,21 +4852,22 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, handler(rtwdev, skb, len); } -bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, - u32 reg_base, u32 *cr) +static +bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr) { const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; enum rtw89_qta_mode mode = dle_mem->mode; u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx); - if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) { + if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) { rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", addr); goto error; } - if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR) + if (addr >= CMAC1_START_ADDR_AX && addr <= CMAC1_END_ADDR_AX) if (mode == RTW89_QTA_SCC) { rtw89_err(rtwdev, "[TXPWR] addr=0x%x but hw not enable\n", @@ -4764,7 +4884,6 @@ error: return false; } -EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr); int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) { @@ -4799,6 +4918,7 @@ void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) #define MAC_AX_LEN_TH_MAX 255 #define MAC_AX_TIME_TH_DEF 88 #define MAC_AX_LEN_TH_DEF 4080 + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct ieee80211_hw *hw = rtwdev->hw; u32 rts_threshold = hw->wiphy->rts_threshold; u32 time_th, len_th; @@ -4815,7 +4935,7 @@ void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_HT_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_len_ht, mac_idx); rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); } @@ -5153,6 +5273,9 @@ static void rtw89_mac_bfee_standby_timer(struct rtw89_dev *rtwdev, u8 mac_idx, { u32 reg; + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee standby_timer to %d\n", keep); reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); if (keep) { @@ -5166,14 +5289,14 @@ static void rtw89_mac_bfee_standby_timer(struct rtw89_dev *rtwdev, u8 mac_idx, } } -static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) +void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 reg; - u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | - B_AX_BFMEE_HE_NDPA_EN; + u32 mask = mac->bfee_ctrl.mask; rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, mac->bfee_ctrl.addr, mac_idx); if (en) { set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); rtw89_write32_set(rtwdev, reg, mask); @@ -5183,7 +5306,7 @@ static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) } } -static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) +static int rtw89_mac_init_bfee_ax(struct rtw89_dev *rtwdev, u8 mac_idx) { u32 reg; u32 val32; @@ -5225,9 +5348,9 @@ static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) return 0; } -static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; u8 mac_idx = rtwvif->mac_idx; @@ -5283,9 +5406,9 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, return 0; } -static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static int rtw89_mac_csi_rrsc_ax(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); @@ -5322,17 +5445,18 @@ static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, return 0; } -void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static void rtw89_mac_bf_assoc_ax(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; if (rtw89_sta_has_beamformer_cap(sta)) { rtw89_debug(rtwdev, RTW89_DBG_BF, "initialize bfee for new association\n"); - rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx); - rtw89_mac_set_csi_para_reg(rtwdev, vif, sta); - rtw89_mac_csi_rrsc(rtwdev, vif, sta); + rtw89_mac_init_bfee_ax(rtwdev, rtwvif->mac_idx); + rtw89_mac_set_csi_para_reg_ax(rtwdev, vif, sta); + rtw89_mac_csi_rrsc_ax(rtwdev, vif, sta); } } @@ -5551,8 +5675,9 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool en) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u8 mac_idx = rtwvif->mac_idx; - u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0; + u16 set = mac->muedca_ctrl.mask; u32 reg; u32 ret; @@ -5560,7 +5685,7 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, if (ret) return ret; - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MUEDCA_EN, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, mac->muedca_ctrl.addr, mac_idx); if (en) rtw89_write16_set(rtwdev, reg, set); else @@ -5684,11 +5809,51 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev, return ret; } +static u8 rtw89_fw_get_rdy_ax(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) +{ + u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); + + return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); +} + +static +int rtw89_fwdl_check_path_ready_ax(struct rtw89_dev *rtwdev, + bool h2c_or_fwdl) +{ + u8 check = h2c_or_fwdl ? B_AX_H2C_PATH_RDY : B_AX_FWDL_PATH_RDY; + u8 val; + + return read_poll_timeout_atomic(rtw89_read8, val, val & check, + 1, FWDL_WAIT_CNT, false, + rtwdev, R_AX_WCPU_FW_CTRL); +} + const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { .band1_offset = RTW89_MAC_AX_BAND_REG_OFFSET, .filter_model_addr = R_AX_FILTER_MODEL_ADDR, .indir_access_addr = R_AX_INDIR_ACCESS_ENTRY, .mem_base_addrs = rtw89_mac_mem_base_addrs_ax, .rx_fltr = R_AX_RX_FLTR_OPT, + .port_base = &rtw89_port_base_ax, + .agg_len_ht = R_AX_AGG_LEN_HT_0, + + .muedca_ctrl = { + .addr = R_AX_MUEDCA_EN, + .mask = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0, + }, + .bfee_ctrl = { + .addr = R_AX_BFMEE_RESP_OPTION, + .mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | + B_AX_BFMEE_HE_NDPA_EN, + }, + + .bf_assoc = rtw89_mac_bf_assoc_ax, + + .disable_cpu = rtw89_mac_disable_cpu_ax, + .fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax, + .fwdl_get_status = rtw89_fw_get_rdy_ax, + .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_ax, + + .get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax, }; EXPORT_SYMBOL(rtw89_mac_gen_ax); diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 7cf34137c0..f9fef678f3 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -858,6 +858,24 @@ struct rtw89_mac_gen_def { u32 indir_access_addr; const u32 *mem_base_addrs; u32 rx_fltr; + const struct rtw89_port_reg *port_base; + u32 agg_len_ht; + + struct rtw89_reg_def muedca_ctrl; + struct rtw89_reg_def bfee_ctrl; + + void (*bf_assoc)(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + + void (*disable_cpu)(struct rtw89_dev *rtwdev); + int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason, + bool dlfw, bool include_bb); + u8 (*fwdl_get_status)(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type); + int (*fwdl_check_path_ready)(struct rtw89_dev *rtwdev, bool h2c_or_fwdl); + + bool (*get_txpwr_cr)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr); }; extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax; @@ -957,7 +975,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, } void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev); -int rtw89_mac_partial_init(struct rtw89_dev *rtwdev); +int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb); int rtw89_mac_init(struct rtw89_dev *rtwdev); int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 band, enum rtw89_mac_hwmod_sel sel); @@ -974,9 +992,8 @@ int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); +void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en); int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); -void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev); -int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw); int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev); int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev); @@ -1023,13 +1040,19 @@ u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev); bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev); int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl); int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl); -bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, - u32 reg_base, u32 *cr); void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter); void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev); + +static inline void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + struct ieee80211_sta *sta) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + + if (mac->bf_assoc) + mac->bf_assoc(rtwdev, vif, sta); +} + void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, @@ -1037,6 +1060,7 @@ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif * void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, bool disconnect); void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev); +void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en); int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, @@ -1045,6 +1069,9 @@ int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause); static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) { + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + if (!test_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags)) return; @@ -1055,9 +1082,10 @@ static inline int rtw89_mac_txpwr_read32(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u32 reg_base, u32 *val) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 cr; - if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + if (!mac->get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) return -EINVAL; *val = rtw89_read32(rtwdev, cr); @@ -1068,9 +1096,10 @@ static inline int rtw89_mac_txpwr_write32(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u32 reg_base, u32 val) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 cr; - if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + if (!mac->get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) return -EINVAL; rtw89_write32(rtwdev, cr, val); @@ -1081,9 +1110,10 @@ static inline int rtw89_mac_txpwr_write32_mask(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u32 reg_base, u32 mask, u32 val) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 cr; - if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + if (!mac->get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) return -EINVAL; rtw89_write32_mask(rtwdev, cr, mask, val); diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 5e48618706..b7ceaf5595 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -145,6 +145,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, rtwvif->mac_idx = RTW89_MAC_0; rtwvif->phy_idx = RTW89_PHY_0; rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0; + rtwvif->chanctx_assigned = false; rtwvif->hit_rule = 0; rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; ether_addr_copy(rtwvif->mac_addr, vif->addr); @@ -327,11 +328,14 @@ static void ____rtw89_conf_tx_edca(struct rtw89_dev *rtwdev, rtw89_fw_h2c_set_edca(rtwdev, rtwvif, ac_to_fw_idx[ac], val); } -static const u32 ac_to_mu_edca_param[IEEE80211_NUM_ACS] = { - [IEEE80211_AC_VO] = R_AX_MUEDCA_VO_PARAM_0, - [IEEE80211_AC_VI] = R_AX_MUEDCA_VI_PARAM_0, - [IEEE80211_AC_BE] = R_AX_MUEDCA_BE_PARAM_0, - [IEEE80211_AC_BK] = R_AX_MUEDCA_BK_PARAM_0, +#define R_MUEDCA_ACS_PARAM(acs) {R_AX_MUEDCA_ ## acs ## _PARAM_0, \ + R_BE_MUEDCA_ ## acs ## _PARAM_0} + +static const u32 ac_to_mu_edca_param[IEEE80211_NUM_ACS][RTW89_CHIP_GEN_NUM] = { + [IEEE80211_AC_VO] = R_MUEDCA_ACS_PARAM(VO), + [IEEE80211_AC_VI] = R_MUEDCA_ACS_PARAM(VI), + [IEEE80211_AC_BE] = R_MUEDCA_ACS_PARAM(BE), + [IEEE80211_AC_BK] = R_MUEDCA_ACS_PARAM(BK), }; static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, @@ -339,6 +343,7 @@ static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, { struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac]; struct ieee80211_he_mu_edca_param_ac_rec *mu_edca; + int gen = rtwdev->chip->chip_gen; u8 aifs, aifsn; u16 timer_32us; u32 reg; @@ -355,7 +360,7 @@ static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, val = FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK, timer_32us) | FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_CW_MASK, mu_edca->ecw_min_max) | FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK, aifs); - reg = rtw89_mac_reg_by_idx(rtwdev, ac_to_mu_edca_param[ac], rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, ac_to_mu_edca_param[ac][gen], rtwvif->mac_idx); rtw89_write32(rtwdev, reg, val); rtw89_mac_set_hw_muedca_ctrl(rtwdev, rtwvif, true); @@ -445,7 +450,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, rtw89_mac_bf_set_gid_table(rtwdev, vif, conf); if (changed & BSS_CHANGED_P2P_PS) - rtw89_process_p2p_ps(rtwdev, vif); + rtw89_core_update_p2p_ps(rtwdev, vif); if (changed & BSS_CHANGED_CQM) rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); @@ -472,6 +477,9 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, return -EOPNOTSUPP; } + if (rtwdev->scanning) + rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); + ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid); rtw89_cam_bssid_changed(rtwdev, rtwvif); rtw89_mac_port_update(rtwdev, rtwvif); diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index 9a63fb35e8..3278f241db 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -2,6 +2,8 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include "debug.h" +#include "fw.h" #include "mac.h" #include "reg.h" @@ -28,11 +30,406 @@ static const u32 rtw89_mac_mem_base_addrs_be[RTW89_MAC_MEM_NUM] = { [RTW89_MAC_MEM_WD_PAGE] = WD_PAGE_BASE_ADDR_BE, }; +static const struct rtw89_port_reg rtw89_port_base_be = { + .port_cfg = R_BE_PORT_CFG_P0, + .tbtt_prohib = R_BE_TBTT_PROHIB_P0, + .bcn_area = R_BE_BCN_AREA_P0, + .bcn_early = R_BE_BCNERLYINT_CFG_P0, + .tbtt_early = R_BE_TBTTERLYINT_CFG_P0, + .tbtt_agg = R_BE_TBTT_AGG_P0, + .bcn_space = R_BE_BCN_SPACE_CFG_P0, + .bcn_forcetx = R_BE_BCN_FORCETX_P0, + .bcn_err_cnt = R_BE_BCN_ERR_CNT_P0, + .bcn_err_flag = R_BE_BCN_ERR_FLAG_P0, + .dtim_ctrl = R_BE_DTIM_CTRL_P0, + .tbtt_shift = R_BE_TBTT_SHIFT_P0, + .bcn_cnt_tmr = R_BE_BCN_CNT_TMR_P0, + .tsftr_l = R_BE_TSFTR_LOW_P0, + .tsftr_h = R_BE_TSFTR_HIGH_P0, + .md_tsft = R_BE_WMTX_MOREDATA_TSFT_STMP_CTL, + .bss_color = R_BE_PTCL_BSS_COLOR_0, + .mbssid = R_BE_MBSSID_CTRL, + .mbssid_drop = R_BE_MBSSID_DROP_0, + .tsf_sync = R_BE_PORT_0_TSF_SYNC, + .hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG, + R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2, + R_BE_PORT_HGQ_WINDOW_CFG + 3}, +}; + +static void rtw89_mac_disable_cpu_be(struct rtw89_dev *rtwdev) +{ + u32 val32; + + clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); + + rtw89_write32_clr(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + rtw89_write32_set(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_HOLD_AFTER_RESET); + rtw89_write32_set(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + + val32 = rtw89_read32(rtwdev, R_BE_WCPU_FW_CTRL); + val32 &= B_BE_RUN_ENV_MASK; + rtw89_write32(rtwdev, R_BE_WCPU_FW_CTRL, val32); + + rtw89_write32_set(rtwdev, R_BE_DCPU_PLATFORM_ENABLE, B_BE_DCPU_PLATFORM_EN); + + rtw89_write32(rtwdev, R_BE_UDM0, 0); + rtw89_write32(rtwdev, R_BE_HALT_C2H, 0); + rtw89_write32(rtwdev, R_BE_UDM2, 0); +} + +static void set_cpu_en(struct rtw89_dev *rtwdev, bool include_bb) +{ + u32 set = B_BE_WLANCPU_FWDL_EN; + + if (include_bb) + set |= B_BE_BBMCU0_FWDL_EN; + + rtw89_write32_set(rtwdev, R_BE_WCPU_FW_CTRL, set); +} + +static int wcpu_on(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw) +{ + u32 val32; + int ret; + + rtw89_write32_set(rtwdev, R_BE_UDM0, B_BE_UDM0_DBG_MODE_CTRL); + + val32 = rtw89_read32(rtwdev, R_BE_HALT_C2H); + if (val32) { + rtw89_warn(rtwdev, "[SER] AON L2 Debug register not empty before Boot.\n"); + rtw89_warn(rtwdev, "[SER] %s: R_BE_HALT_C2H = 0x%x\n", __func__, val32); + } + val32 = rtw89_read32(rtwdev, R_BE_UDM1); + if (val32) { + rtw89_warn(rtwdev, "[SER] AON L2 Debug register not empty before Boot.\n"); + rtw89_warn(rtwdev, "[SER] %s: R_BE_UDM1 = 0x%x\n", __func__, val32); + } + val32 = rtw89_read32(rtwdev, R_BE_UDM2); + if (val32) { + rtw89_warn(rtwdev, "[SER] AON L2 Debug register not empty before Boot.\n"); + rtw89_warn(rtwdev, "[SER] %s: R_BE_UDM2 = 0x%x\n", __func__, val32); + } + + rtw89_write32(rtwdev, R_BE_UDM1, 0); + rtw89_write32(rtwdev, R_BE_UDM2, 0); + rtw89_write32(rtwdev, R_BE_HALT_H2C, 0); + rtw89_write32(rtwdev, R_BE_HALT_C2H, 0); + rtw89_write32(rtwdev, R_BE_HALT_H2C_CTRL, 0); + rtw89_write32(rtwdev, R_BE_HALT_C2H_CTRL, 0); + + rtw89_write32_set(rtwdev, R_BE_SYS_CLK_CTRL, B_BE_CPU_CLK_EN); + rtw89_write32_clr(rtwdev, R_BE_SYS_CFG5, + B_BE_WDT_WAKE_PCIE_EN | B_BE_WDT_WAKE_USB_EN); + rtw89_write32_clr(rtwdev, R_BE_WCPU_FW_CTRL, + B_BE_WDT_PLT_RST_EN | B_BE_WCPU_ROM_CUT_GET); + + rtw89_write16_mask(rtwdev, R_BE_BOOT_REASON, B_BE_BOOT_REASON_MASK, boot_reason); + rtw89_write32_clr(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + rtw89_write32_clr(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_HOLD_AFTER_RESET); + rtw89_write32_set(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + + if (!dlfw) { + ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); + if (ret) + return ret; + } + + return 0; +} + +static int rtw89_mac_fwdl_enable_wcpu_be(struct rtw89_dev *rtwdev, + u8 boot_reason, bool dlfw, + bool include_bb) +{ + set_cpu_en(rtwdev, include_bb); + + return wcpu_on(rtwdev, boot_reason, dlfw); +} + +static const u8 fwdl_status_map[] = { + [0] = RTW89_FWDL_INITIAL_STATE, + [1] = RTW89_FWDL_FWDL_ONGOING, + [4] = RTW89_FWDL_CHECKSUM_FAIL, + [5] = RTW89_FWDL_SECURITY_FAIL, + [6] = RTW89_FWDL_SECURITY_FAIL, + [7] = RTW89_FWDL_CV_NOT_MATCH, + [8] = RTW89_FWDL_RSVD0, + [2] = RTW89_FWDL_WCPU_FWDL_RDY, + [3] = RTW89_FWDL_WCPU_FW_INIT_RDY, + [9] = RTW89_FWDL_RSVD0, +}; + +static u8 fwdl_get_status_be(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) +{ + bool check_pass = false; + u32 val32; + u8 st; + + val32 = rtw89_read32(rtwdev, R_BE_WCPU_FW_CTRL); + + switch (type) { + case RTW89_FWDL_CHECK_WCPU_FWDL_DONE: + check_pass = !(val32 & B_BE_WLANCPU_FWDL_EN); + break; + case RTW89_FWDL_CHECK_DCPU_FWDL_DONE: + check_pass = !(val32 & B_BE_DATACPU_FWDL_EN); + break; + case RTW89_FWDL_CHECK_BB0_FWDL_DONE: + check_pass = !(val32 & B_BE_BBMCU0_FWDL_EN); + break; + case RTW89_FWDL_CHECK_BB1_FWDL_DONE: + check_pass = !(val32 & B_BE_BBMCU1_FWDL_EN); + break; + default: + break; + } + + if (check_pass) + return RTW89_FWDL_WCPU_FW_INIT_RDY; + + st = u32_get_bits(val32, B_BE_WCPU_FWDL_STATUS_MASK); + if (st < ARRAY_SIZE(fwdl_status_map)) + return fwdl_status_map[st]; + + return st; +} + +static int rtw89_fwdl_check_path_ready_be(struct rtw89_dev *rtwdev, + bool h2c_or_fwdl) +{ + u32 check = h2c_or_fwdl ? B_BE_H2C_PATH_RDY : B_BE_DLFW_PATH_RDY; + u32 val; + + return read_poll_timeout_atomic(rtw89_read32, val, val & check, + 1, 1000000, false, + rtwdev, R_BE_WCPU_FW_CTRL); +} + +static bool rtw89_mac_get_txpwr_cr_be(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr) +{ + const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; + enum rtw89_qta_mode mode = dle_mem->mode; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, (enum rtw89_mac_idx)phy_idx, + RTW89_CMAC_SEL); + if (ret) { + if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) + return false; + + rtw89_err(rtwdev, "[TXPWR] check mac enable failed\n"); + return false; + } + + if (reg_base < R_BE_PWR_MODULE || reg_base > R_BE_CMAC_FUNC_EN_C1) { + rtw89_err(rtwdev, "[TXPWR] reg_base=0x%x exceed txpwr cr\n", + reg_base); + return false; + } + + *cr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx); + + if (*cr >= CMAC1_START_ADDR_BE && *cr <= CMAC1_END_ADDR_BE) { + if (mode == RTW89_QTA_SCC) { + rtw89_err(rtwdev, + "[TXPWR] addr=0x%x but hw not enable\n", + *cr); + return false; + } + } + + return true; +} + +static int rtw89_mac_init_bfee_be(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg; + u32 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL | + B_BE_BFMEE_USE_NSTS | + B_BE_BFMEE_CSI_GID_SEL | + B_BE_BFMEE_CSI_FORCE_RETE_EN); + rtw89_write32_mask(rtwdev, reg, B_BE_BFMEE_CSI_RSC_MASK, CSI_RX_BW_CFG); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CSIRPT_OPTION, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_CSIPRT_VHTSU_AID_EN | + B_BE_CSIPRT_HESU_AID_EN | + B_BE_CSIPRT_EHTSU_AID_EN); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_RRSC, mac_idx); + rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP_BE); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_BE_BFMEE_BE_CSI_RRSC_BITMAP_MASK, + CSI_RRSC_BITMAP_CFG); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_RATE, mac_idx); + val = u32_encode_bits(CSI_INIT_RATE_HT, B_BE_BFMEE_HT_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_VHT, B_BE_BFMEE_VHT_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_HE, B_BE_BFMEE_HE_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_EHT, B_BE_BFMEE_EHT_CSI_RATE_MASK); + + rtw89_write32(rtwdev, reg, val); + + return 0; +} + +static int rtw89_mac_set_csi_para_reg_be(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; + u8 mac_idx = rtwvif->mac_idx; + u8 port_sel = rtwvif->port; + u8 sound_dim = 3, t; + u8 *phy_cap; + u32 reg; + u16 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info; + + if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || + (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { + ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); + stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); + t = u8_get_bits(phy_cap[5], + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK); + sound_dim = min(sound_dim, t); + } + + if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { + ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); + stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); + t = u32_get_bits(sta->deflink.vht_cap.cap, + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); + sound_dim = min(sound_dim, t); + } + + nc = min(nc, sound_dim); + nr = min(nr, sound_dim); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL); + + val = u16_encode_bits(nc, B_BE_BFMEE_CSIINFO0_NC_MASK) | + u16_encode_bits(nr, B_BE_BFMEE_CSIINFO0_NR_MASK) | + u16_encode_bits(ng, B_BE_BFMEE_CSIINFO0_NG_MASK) | + u16_encode_bits(cb, B_BE_BFMEE_CSIINFO0_CB_MASK) | + u16_encode_bits(cs, B_BE_BFMEE_CSIINFO0_CS_MASK) | + u16_encode_bits(ldpc_en, B_BE_BFMEE_CSIINFO0_LDPC_EN) | + u16_encode_bits(stbc_en, B_BE_BFMEE_CSIINFO0_STBC_EN); + + if (port_sel == 0) + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, + mac_idx); + else + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_1, + mac_idx); + + rtw89_write16(rtwdev, reg, val); + + return 0; +} + +static int rtw89_mac_csi_rrsc_be(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); + u8 mac_idx = rtwvif->mac_idx; + int ret; + u32 reg; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + if (sta->deflink.he_cap.has_he) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | + BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | + BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); + } + if (sta->deflink.vht_cap.vht_supported) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | + BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | + BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); + } + if (sta->deflink.ht_cap.ht_supported) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | + BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | + BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); + } + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL); + rtw89_write32_clr(rtwdev, reg, B_BE_BFMEE_CSI_FORCE_RETE_EN); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_RRSC, mac_idx); + rtw89_write32(rtwdev, reg, rrsc); + + return 0; +} + +static void rtw89_mac_bf_assoc_be(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + if (rtw89_sta_has_beamformer_cap(sta)) { + rtw89_debug(rtwdev, RTW89_DBG_BF, + "initialize bfee for new association\n"); + rtw89_mac_init_bfee_be(rtwdev, rtwvif->mac_idx); + rtw89_mac_set_csi_para_reg_be(rtwdev, vif, sta); + rtw89_mac_csi_rrsc_be(rtwdev, vif, sta); + } +} + const struct rtw89_mac_gen_def rtw89_mac_gen_be = { .band1_offset = RTW89_MAC_BE_BAND_REG_OFFSET, .filter_model_addr = R_BE_FILTER_MODEL_ADDR, .indir_access_addr = R_BE_INDIR_ACCESS_ENTRY, .mem_base_addrs = rtw89_mac_mem_base_addrs_be, .rx_fltr = R_BE_RX_FLTR_OPT, + .port_base = &rtw89_port_base_be, + .agg_len_ht = R_BE_AGG_LEN_HT_0, + + .muedca_ctrl = { + .addr = R_BE_MUEDCA_EN, + .mask = B_BE_MUEDCA_EN_0 | B_BE_SET_MUEDCATIMER_TF_0, + }, + .bfee_ctrl = { + .addr = R_BE_BFMEE_RESP_OPTION, + .mask = B_BE_BFMEE_HT_NDPA_EN | B_BE_BFMEE_VHT_NDPA_EN | + B_BE_BFMEE_HE_NDPA_EN | B_BE_BFMEE_EHT_NDPA_EN, + }, + + .bf_assoc = rtw89_mac_bf_assoc_be, + + .disable_cpu = rtw89_mac_disable_cpu_be, + .fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be, + .fwdl_get_status = fwdl_get_status_be, + .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_be, + + .get_txpwr_cr = rtw89_mac_get_txpwr_cr_be, }; EXPORT_SYMBOL(rtw89_mac_gen_be); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 3a4bfc4414..14ddb0d39e 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1196,7 +1196,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; - struct rtw89_txwd_info *txwd_info; struct rtw89_pci_tx_wp_info *txwp_info; void *txaddr_info_addr; struct pci_dev *pdev = rtwpci->pdev; @@ -1222,7 +1221,7 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, txwp_len = sizeof(*txwp_info); txwd_len = chip->txwd_body_size; - txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; + txwd_len += en_wd_info ? chip->txwd_info_size : 0; txwp_info = txwd->vaddr + txwd_len; txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 7139146cb3..17ccc9efed 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -88,6 +88,55 @@ static u64 get_he_ra_mask(struct ieee80211_sta *sta) return get_mcs_ra_mask(mcs_map, 11, 2); } +static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss) +{ + u64 nss_mcs_shift; + u64 nss_mcs_val; + u64 mask = 0; + int i, j; + u8 nss; + + for (i = 0; i < n_nss; i++) { + nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX); + if (!nss) + continue; + + nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0); + + for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16) + mask |= nss_mcs_val << nss_mcs_shift; + } + + return mask; +} + +static u64 get_eht_ra_mask(struct ieee80211_sta *sta) +{ + struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; + struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz; + struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss; + + switch (sta->deflink.bandwidth) { + case IEEE80211_STA_RX_BW_320: + mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320; + /* MCS 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); + case IEEE80211_STA_RX_BW_160: + mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160; + /* MCS 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); + case IEEE80211_STA_RX_BW_80: + default: + mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80; + /* MCS 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); + case IEEE80211_STA_RX_BW_20: + mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; + /* MCS 7, 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); + } +} + #define RA_FLOOR_TABLE_SIZE 7 #define RA_FLOOR_UP_GAP 3 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi, @@ -194,6 +243,9 @@ rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES, static const u64 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES}; +static const u64 +rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES, + RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES}; static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, @@ -255,7 +307,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, memset(ra, 0, sizeof(*ra)); /* Set the ra mask from sta's capability */ - if (sta->deflink.he_cap.has_he) { + if (sta->deflink.eht_cap.has_eht) { + mode |= RTW89_RA_MODE_EHT; + ra_mask |= get_eht_ra_mask(sta); + high_rate_masks = rtw89_ra_mask_eht_rates; + } else if (sta->deflink.he_cap.has_he) { mode |= RTW89_RA_MODE_HE; csi_mode = RTW89_RA_RPT_MODE_HE; ra_mask |= get_he_ra_mask(sta); @@ -1519,15 +1575,15 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl); -static const u8 rtw89_rs_idx_num[] = { +static const u8 rtw89_rs_idx_num_ax[] = { [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM, [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM, - [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM, + [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX, [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM, - [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM, + [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX, }; -static const u8 rtw89_rs_nss_num[] = { +static const u8 rtw89_rs_nss_num_ax[] = { [RTW89_RS_CCK] = 1, [RTW89_RS_OFDM] = 1, [RTW89_RS_MCS] = RTW89_NSS_NUM, @@ -1535,68 +1591,73 @@ static const u8 rtw89_rs_nss_num[] = { [RTW89_RS_OFFSET] = 1, }; -static const u8 _byr_of_rs[] = { - [RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck), - [RTW89_RS_OFDM] = offsetof(struct rtw89_txpwr_byrate, ofdm), - [RTW89_RS_MCS] = offsetof(struct rtw89_txpwr_byrate, mcs), - [RTW89_RS_HEDCM] = offsetof(struct rtw89_txpwr_byrate, hedcm), - [RTW89_RS_OFFSET] = offsetof(struct rtw89_txpwr_byrate, offset), -}; - -#define _byr_seek(rs, raw) ((s8 *)(raw) + _byr_of_rs[rs]) -#define _byr_idx(rs, nss, idx) ((nss) * rtw89_rs_idx_num[rs] + (idx)) -#define _byr_chk(rs, nss, idx) \ - ((nss) < rtw89_rs_nss_num[rs] && (idx) < rtw89_rs_idx_num[rs]) +s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_byrate *head, + const struct rtw89_rate_desc *desc) +{ + switch (desc->rs) { + case RTW89_RS_CCK: + return &head->cck[desc->idx]; + case RTW89_RS_OFDM: + return &head->ofdm[desc->idx]; + case RTW89_RS_MCS: + return &head->mcs[desc->ofdma][desc->nss][desc->idx]; + case RTW89_RS_HEDCM: + return &head->hedcm[desc->ofdma][desc->nss][desc->idx]; + case RTW89_RS_OFFSET: + return &head->offset[desc->idx]; + default: + rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs); + return &head->trap; + } +} void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, const struct rtw89_txpwr_table *tbl) { const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data; const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size; + struct rtw89_txpwr_byrate *byr_head; + struct rtw89_rate_desc desc = {}; s8 *byr; u32 data; - u8 i, idx; + u8 i; for (; cfg < end; cfg++) { - byr = _byr_seek(cfg->rs, &rtwdev->byr[cfg->band]); + byr_head = &rtwdev->byr[cfg->band][0]; + desc.rs = cfg->rs; + desc.nss = cfg->nss; data = cfg->data; for (i = 0; i < cfg->len; i++, data >>= 8) { - idx = _byr_idx(cfg->rs, cfg->nss, (cfg->shf + i)); - byr[idx] = (s8)(data & 0xff); + desc.idx = cfg->shf + i; + byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); + *byr = data & 0xff; } } } EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate); -#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \ -({ \ - const struct rtw89_chip_info *__c = (rtwdev)->chip; \ - (txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \ -}) +static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; -static -s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, + return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); +} + +s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, const struct rtw89_rate_desc *rate_desc) { + struct rtw89_txpwr_byrate *byr_head; s8 *byr; - u8 idx; if (rate_desc->rs == RTW89_RS_CCK) band = RTW89_BAND_2G; - if (!_byr_chk(rate_desc->rs, rate_desc->nss, rate_desc->idx)) { - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, - "[TXPWR] unknown byrate desc rs=%d nss=%d idx=%d\n", - rate_desc->rs, rate_desc->nss, rate_desc->idx); - - return 0; - } - - byr = _byr_seek(rate_desc->rs, &rtwdev->byr[band]); - idx = _byr_idx(rate_desc->rs, rate_desc->nss, rate_desc->idx); + byr_head = &rtwdev->byr[band][bw]; + byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc); - return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]); + return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr); } static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g) @@ -1688,7 +1749,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, return 0; } - lmt = _phy_txpwr_rf_to_mac(rtwdev, lmt); + lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt); sar = rtw89_query_sar(rtwdev, freq); return min(lmt, sar); @@ -1706,9 +1767,9 @@ EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); (ch)); \ } while (0) -static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch) +static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch) { __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch); @@ -1721,9 +1782,9 @@ static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev, ntx, RTW89_RS_MCS, ch); } -static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch, u8 pri_ch) +static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) { __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch - 2); @@ -1742,9 +1803,9 @@ static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev, ntx, RTW89_RS_MCS, ch); } -static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch, u8 pri_ch) +static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) { s8 val_0p5_n[RTW89_BF_NUM]; s8 val_0p5_p[RTW89_BF_NUM]; @@ -1783,9 +1844,9 @@ static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev, lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); } -static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch, u8 pri_ch) +static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) { s8 val_0p5_n[RTW89_BF_NUM]; s8 val_0p5_p[RTW89_BF_NUM]; @@ -1870,10 +1931,10 @@ static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev, } static -void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - struct rtw89_txpwr_limit *lmt, - u8 ntx) +void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_ax *lmt, + u8 ntx) { u8 band = chan->band_type; u8 pri_ch = chan->primary_channel; @@ -1884,25 +1945,25 @@ void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev, switch (bw) { case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch); + rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch); break; case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch, - pri_ch); + rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch, + pri_ch); break; case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch, - pri_ch); + rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch, + pri_ch); break; case RTW89_CHANNEL_WIDTH_160: - rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch, - pri_ch); + rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch, + pri_ch); break; } } -static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, - u8 ru, u8 ntx, u8 ch) +s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, + u8 ru, u8 ntx, u8 ch) { const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; @@ -1945,16 +2006,16 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, return 0; } - lmt_ru = _phy_txpwr_rf_to_mac(rtwdev, lmt_ru); + lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru); sar = rtw89_query_sar(rtwdev, freq); return min(lmt_ru, sar); } static void -rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, @@ -1968,9 +2029,9 @@ rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev, } static void -rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, @@ -1993,9 +2054,9 @@ rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev, } static void -rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, @@ -2036,15 +2097,15 @@ rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev, } static void -rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 }; int i; - static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM); - for (i = 0; i < RTW89_RU_SEC_NUM; i++) { + static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX); + for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) { lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, ntx, @@ -2061,10 +2122,10 @@ rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev, } static -void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 ntx) +void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 ntx) { u8 band = chan->band_type; u8 ch = chan->channel; @@ -2074,27 +2135,27 @@ void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev, switch (bw) { case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; case RTW89_CHANNEL_WIDTH_160: - rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; } } -void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { u8 max_nss_num = rtwdev->chip->rf_path_num; static const u8 rs[] = { @@ -2103,7 +2164,7 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, RTW89_RS_MCS, RTW89_RS_HEDCM, }; - struct rtw89_rate_desc cur; + struct rtw89_rate_desc cur = {}; u8 band = chan->band_type; u8 ch = chan->channel; u32 addr, val; @@ -2113,23 +2174,23 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr byrate with ch=%d\n", ch); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_CCK] % 4); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_OFDM] % 4); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_MCS] % 4); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_HEDCM] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4); addr = R_AX_PWR_BY_RATE; for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) { for (i = 0; i < ARRAY_SIZE(rs); i++) { - if (cur.nss >= rtw89_rs_nss_num[rs[i]]) + if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]]) continue; cur.rs = rs[i]; - for (cur.idx = 0; cur.idx < rtw89_rs_idx_num[rs[i]]; + for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]]; cur.idx++) { v[cur.idx % 4] = rtw89_phy_read_txpwr_byrate(rtwdev, - band, + band, 0, &cur); if ((cur.idx + 1) % 4) @@ -2147,26 +2208,26 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, } } } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_byrate); -void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static +void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { struct rtw89_rate_desc desc = { .nss = RTW89_NSS_1, .rs = RTW89_RS_OFFSET, }; u8 band = chan->band_type; - s8 v[RTW89_RATE_OFFSET_NUM] = {}; + s8 v[RTW89_RATE_OFFSET_NUM_AX] = {}; u32 val; rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); - for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM; desc.idx++) - v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc); + for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++) + v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); - BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM != 5); + BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5); val = FIELD_PREP(GENMASK(3, 0), v[0]) | FIELD_PREP(GENMASK(7, 4), v[1]) | FIELD_PREP(GENMASK(11, 8), v[2]) | @@ -2176,14 +2237,13 @@ void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev, rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, GENMASK(19, 0), val); } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_offset); -void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { u8 max_ntx_num = rtwdev->chip->rf_path_num; - struct rtw89_txpwr_limit lmt; + struct rtw89_txpwr_limit_ax lmt; u8 ch = chan->channel; u8 bw = chan->band_width; const s8 *ptr; @@ -2193,15 +2253,15 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); - BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit) != - RTW89_TXPWR_LMT_PAGE_SIZE); + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) != + RTW89_TXPWR_LMT_PAGE_SIZE_AX); addr = R_AX_PWR_LMT; for (i = 0; i < max_ntx_num; i++) { - rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i); + rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i); ptr = (s8 *)&lmt; - for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE; + for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX; j += 4, addr += 4, ptr += 4) { val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | FIELD_PREP(GENMASK(15, 8), ptr[1]) | @@ -2212,14 +2272,13 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, } } } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit); -void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { u8 max_ntx_num = rtwdev->chip->rf_path_num; - struct rtw89_txpwr_limit_ru lmt_ru; + struct rtw89_txpwr_limit_ru_ax lmt_ru; u8 ch = chan->channel; u8 bw = chan->band_width; const s8 *ptr; @@ -2229,15 +2288,15 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); - BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru) != - RTW89_TXPWR_LMT_RU_PAGE_SIZE); + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) != + RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX); addr = R_AX_PWR_RU_LMT; for (i = 0; i < max_ntx_num; i++) { - rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i); + rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i); ptr = (s8 *)&lmt_ru; - for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE; + for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX; j += 4, addr += 4, ptr += 4) { val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | FIELD_PREP(GENMASK(15, 8), ptr[1]) | @@ -2248,7 +2307,6 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, } } } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit_ru); struct rtw89_phy_iter_ra_data { struct rtw89_dev *rtwdev; @@ -2341,6 +2399,18 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2; mcs = ra_report->txrate.mcs; break; + case RTW89_RA_RPT_MODE_EHT: + ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS; + ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1); + ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1; + if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) + ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8; + else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) + ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6; + else + ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2; + mcs = ra_report->txrate.mcs; + break; } ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw); @@ -2487,6 +2557,9 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) s32 dcfo_comp_val; int sign; + if (rtwdev->chip->chip_id == RTL8922A) + return; + if (!is_linked) { rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n", is_linked); @@ -2507,16 +2580,23 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_cfo_regs *cfo = phy->cfo; - rtw89_phy_set_phy_regs(rtwdev, R_DCFO_OPT, B_DCFO_OPT_EN, 1); - rtw89_phy_set_phy_regs(rtwdev, R_DCFO_WEIGHT, B_DCFO_WEIGHT_MSK, 8); + rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8); - if (chip->cfo_hw_comp) - rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, - B_AX_PWR_UL_CFO_MASK, 0x6); - else - rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, B_AX_PWR_UL_CFO_MASK); + if (chip->chip_gen == RTW89_CHIP_AX) { + if (chip->cfo_hw_comp) { + rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, + B_AX_PWR_UL_CFO_MASK, 0x6); + } else { + rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); + rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, + B_AX_PWR_UL_CFO_MASK); + } + } } static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) @@ -2539,7 +2619,6 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n", cfo->crystal_cap_default); rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true); - rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); rtw89_dcfo_comp_init(rtwdev); cfo->cfo_timer_ms = 2000; cfo->cfo_trig_by_timer_en = false; @@ -2556,11 +2635,15 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev, s32 cfo_abs = abs(curr_cfo); int sign; + if (curr_cfo == 0) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); + return; + } if (!cfo->is_adjust) { if (cfo_abs > CFO_TRK_ENABLE_TH) cfo->is_adjust = true; } else { - if (cfo_abs < CFO_TRK_STOP_TH) + if (cfo_abs <= CFO_TRK_STOP_TH) cfo->is_adjust = false; } if (!cfo->is_adjust) { @@ -2752,10 +2835,6 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) new_cfo = rtw89_phy_average_cfo_calc(rtwdev); else new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev); - if (new_cfo == 0) { - rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); - return; - } if (cfo->divergence_lock_en) { cfo->lock_cnt++; if (cfo->lock_cnt > CFO_PERIOD_CNT) { @@ -2898,7 +2977,7 @@ void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtwvif->sub_entity_idx); struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; - if (!chip->support_ul_tb_ctrl) + if (!chip->ul_tb_waveform_ctrl) return; rtwvif->def_tri_idx = @@ -2928,6 +3007,61 @@ struct rtw89_phy_ul_tb_check_data { u8 def_tri_idx; }; +struct rtw89_phy_power_diff { + u32 q_00; + u32 q_11; + u32 q_matrix_en; + u32 ultb_1t_norm_160; + u32 ultb_2t_norm_160; + u32 com1_norm_1sts; + u32 com2_resp_1sts_path; +}; + +static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + static const struct rtw89_phy_power_diff table[2] = { + {0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3}, + {0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1}, + }; + const struct rtw89_phy_power_diff *param; + u32 reg; + + if (!rtwdev->chip->ul_tb_pwr_diff) + return; + + if (rtwvif->pwr_diff_en == rtwvif->pre_pwr_diff_en) { + rtwvif->pwr_diff_en = false; + return; + } + + rtwvif->pre_pwr_diff_en = rtwvif->pwr_diff_en; + param = &table[rtwvif->pwr_diff_en]; + + rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL, + param->q_00); + rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL, + param->q_11); + rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX, + B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160, + param->ultb_1t_norm_160); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160, + param->ultb_2t_norm_160); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS, + param->com1_norm_1sts); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH, + param->com2_resp_1sts_path); +} + static void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, @@ -2942,41 +3076,34 @@ void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, if (!vif->cfg.assoc) return; - if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) - ul_tb_data->high_tf_client = true; - else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) - ul_tb_data->low_tf_client = true; + if (rtwdev->chip->ul_tb_waveform_ctrl) { + if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) + ul_tb_data->high_tf_client = true; + else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) + ul_tb_data->low_tf_client = true; + + ul_tb_data->valid = true; + ul_tb_data->def_tri_idx = rtwvif->def_tri_idx; + ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en; + } - ul_tb_data->valid = true; - ul_tb_data->def_tri_idx = rtwvif->def_tri_idx; - ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en; + rtw89_phy_ofdma_power_diff(rtwdev, rtwvif); } -void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) +static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev, + struct rtw89_phy_ul_tb_check_data *ul_tb_data) { - const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; - struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; - struct rtw89_vif *rtwvif; - if (!chip->support_ul_tb_ctrl) - return; - - if (rtwdev->total_sta_assoc != 1) + if (!rtwdev->chip->ul_tb_waveform_ctrl) return; - rtw89_for_each_rtwvif(rtwdev, rtwvif) - rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data); - - if (!ul_tb_data.valid) - return; - - if (ul_tb_data.dyn_tb_bedge_en) { - if (ul_tb_data.high_tf_client) { + if (ul_tb_data->dyn_tb_bedge_en) { + if (ul_tb_data->high_tf_client) { rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, "[ULTB] Turn off if_bandedge\n"); - } else if (ul_tb_data.low_tf_client) { + } else if (ul_tb_data->low_tf_client) { rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, ul_tb_info->def_if_bandedge); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, @@ -2986,28 +3113,49 @@ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) } if (ul_tb_info->dyn_tb_tri_en) { - if (ul_tb_data.high_tf_client) { + if (ul_tb_data->high_tf_client) { rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, 0); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, "[ULTB] Turn off Tx triangle\n"); - } else if (ul_tb_data.low_tf_client) { + } else if (ul_tb_data->low_tf_client) { rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, - ul_tb_data.def_tri_idx); + ul_tb_data->def_tri_idx); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, "[ULTB] Set to default tx_shap_idx = %d\n", - ul_tb_data.def_tri_idx); + ul_tb_data->def_tri_idx); } } } +void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; + struct rtw89_vif *rtwvif; + + if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff) + return; + + if (rtwdev->total_sta_assoc != 1) + return; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data); + + if (!ul_tb_data.valid) + return; + + rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data); +} + static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; - if (!chip->support_ul_tb_ctrl) + if (!chip->ul_tb_waveform_ctrl) return; ul_tb_info->dyn_tb_tri_en = true; @@ -4474,8 +4622,6 @@ static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) { - const struct rtw89_chip_info *chip = rtwdev->chip; - rtw89_phy_stat_init(rtwdev); rtw89_chip_bb_sethw(rtwdev); @@ -4491,7 +4637,6 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) rtw89_phy_init_rf_nctl(rtwdev); rtw89_chip_rfk_init(rtwdev); - rtw89_load_txpwr_table(rtwdev, chip->byr_table); rtw89_chip_set_txpwr_ctrl(rtwdev); rtw89_chip_power_trim(rtwdev); rtw89_chip_cfg_txrx_path(rtwdev); @@ -4500,6 +4645,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) { const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld; enum rtw89_phy_idx phy_idx = RTW89_PHY_0; u8 bss_color; @@ -4508,7 +4654,7 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif bss_color = vif->bss_conf.he_bss_color.color; - rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_VLD0, 0x1, + rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1, phy_idx); rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT, bss_color, phy_idx); @@ -4829,9 +4975,22 @@ static const struct rtw89_physts_regs rtw89_physts_regs_ax = { .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, }; +static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = { + .comp = R_DCFO_WEIGHT, + .weighting_mask = B_DCFO_WEIGHT_MSK, + .comp_seg0 = R_DCFO_OPT, + .valid_0_mask = B_DCFO_OPT_EN, +}; + const struct rtw89_phy_gen_def rtw89_phy_gen_ax = { .cr_base = 0x10000, .ccx = &rtw89_ccx_regs_ax, .physts = &rtw89_physts_regs_ax, + .cfo = &rtw89_cfo_regs_ax, + + .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax, + .set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax, + .set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax, + .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax, }; EXPORT_SYMBOL(rtw89_phy_gen_ax); diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index d6dc0cbbae..5c85122e7b 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -46,6 +46,11 @@ #define RA_MASK_HE_3SS_RATES GENMASK_ULL(47, 36) #define RA_MASK_HE_4SS_RATES GENMASK_ULL(59, 48) #define RA_MASK_HE_RATES GENMASK_ULL(59, 12) +#define RA_MASK_EHT_1SS_RATES GENMASK_ULL(27, 12) +#define RA_MASK_EHT_2SS_RATES GENMASK_ULL(43, 28) +#define RA_MASK_EHT_3SS_RATES GENMASK_ULL(59, 44) +#define RA_MASK_EHT_4SS_RATES GENMASK_ULL(62, 60) +#define RA_MASK_EHT_RATES GENMASK_ULL(62, 12) #define CFO_TRK_ENABLE_TH (2 << 2) #define CFO_TRK_STOP_TH_4 (30 << 2) @@ -400,10 +405,97 @@ struct rtw89_physts_regs { u32 dis_trigger_brk_mask; }; +struct rtw89_cfo_regs { + u32 comp; + u32 weighting_mask; + u32 comp_seg0; + u32 valid_0_mask; +}; + +enum rtw89_bandwidth_section_num_ax { + RTW89_BW20_SEC_NUM_AX = 8, + RTW89_BW40_SEC_NUM_AX = 4, + RTW89_BW80_SEC_NUM_AX = 2, +}; + +enum rtw89_bandwidth_section_num_be { + RTW89_BW20_SEC_NUM_BE = 16, + RTW89_BW40_SEC_NUM_BE = 8, + RTW89_BW80_SEC_NUM_BE = 4, + RTW89_BW160_SEC_NUM_BE = 2, +}; + +#define RTW89_TXPWR_LMT_PAGE_SIZE_AX 40 + +struct rtw89_txpwr_limit_ax { + s8 cck_20m[RTW89_BF_NUM]; + s8 cck_40m[RTW89_BF_NUM]; + s8 ofdm[RTW89_BF_NUM]; + s8 mcs_20m[RTW89_BW20_SEC_NUM_AX][RTW89_BF_NUM]; + s8 mcs_40m[RTW89_BW40_SEC_NUM_AX][RTW89_BF_NUM]; + s8 mcs_80m[RTW89_BW80_SEC_NUM_AX][RTW89_BF_NUM]; + s8 mcs_160m[RTW89_BF_NUM]; + s8 mcs_40m_0p5[RTW89_BF_NUM]; + s8 mcs_40m_2p5[RTW89_BF_NUM]; +}; + +#define RTW89_TXPWR_LMT_PAGE_SIZE_BE 76 + +struct rtw89_txpwr_limit_be { + s8 cck_20m[RTW89_BF_NUM]; + s8 cck_40m[RTW89_BF_NUM]; + s8 ofdm[RTW89_BF_NUM]; + s8 mcs_20m[RTW89_BW20_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_40m[RTW89_BW40_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_80m[RTW89_BW80_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_160m[RTW89_BW160_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_320m[RTW89_BF_NUM]; + s8 mcs_40m_0p5[RTW89_BF_NUM]; + s8 mcs_40m_2p5[RTW89_BF_NUM]; + s8 mcs_40m_4p5[RTW89_BF_NUM]; + s8 mcs_40m_6p5[RTW89_BF_NUM]; +}; + +#define RTW89_RU_SEC_NUM_AX 8 + +#define RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX 24 + +struct rtw89_txpwr_limit_ru_ax { + s8 ru26[RTW89_RU_SEC_NUM_AX]; + s8 ru52[RTW89_RU_SEC_NUM_AX]; + s8 ru106[RTW89_RU_SEC_NUM_AX]; +}; + +#define RTW89_RU_SEC_NUM_BE 16 + +#define RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE 80 + +struct rtw89_txpwr_limit_ru_be { + s8 ru26[RTW89_RU_SEC_NUM_BE]; + s8 ru52[RTW89_RU_SEC_NUM_BE]; + s8 ru106[RTW89_RU_SEC_NUM_BE]; + s8 ru52_26[RTW89_RU_SEC_NUM_BE]; + s8 ru106_26[RTW89_RU_SEC_NUM_BE]; +}; + struct rtw89_phy_gen_def { u32 cr_base; const struct rtw89_ccx_regs *ccx; const struct rtw89_physts_regs *physts; + const struct rtw89_cfo_regs *cfo; + + void (*set_txpwr_byrate)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_offset)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_limit)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_limit_ru)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); }; extern const struct rtw89_phy_gen_def rtw89_phy_gen_ax; @@ -613,22 +705,58 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 data, enum rtw89_phy_idx phy_idx); u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, enum rtw89_phy_idx phy_idx); +s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_byrate *head, + const struct rtw89_rate_desc *desc); +s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, + const struct rtw89_rate_desc *rate_desc); void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, const struct rtw89_txpwr_table *tbl); s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch); +s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, + u8 ru, u8 ntx, u8 ch); + +static inline void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_byrate(rtwdev, chan, phy_idx); +} + +static inline void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_offset(rtwdev, chan, phy_idx); +} + +static inline void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_limit(rtwdev, chan, phy_idx); +} + +static inline void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx); +} + void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); void rtw89_phy_ra_update(struct rtw89_dev *rtwdev); void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c index 778e4b0c8e..63eeeea72b 100644 --- a/drivers/net/wireless/realtek/rtw89/phy_be.c +++ b/drivers/net/wireless/realtek/rtw89/phy_be.c @@ -2,6 +2,8 @@ /* Copyright(c) 2023 Realtek Corporation */ +#include "debug.h" +#include "mac.h" #include "phy.h" #include "reg.h" @@ -69,9 +71,583 @@ static const struct rtw89_physts_regs rtw89_physts_regs_be = { .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, }; +static const struct rtw89_cfo_regs rtw89_cfo_regs_be = { + .comp = R_DCFO_WEIGHT_V1, + .weighting_mask = B_DCFO_WEIGHT_MSK_V1, + .comp_seg0 = R_DCFO_OPT_V1, + .valid_0_mask = B_DCFO_OPT_EN_V1, +}; + +struct rtw89_byr_spec_ent_be { + struct rtw89_rate_desc init; + u8 num_of_idx; + bool no_over_bw40; + bool no_multi_nss; +}; + +static const struct rtw89_byr_spec_ent_be rtw89_byr_spec_be[] = { + { + .init = { .rs = RTW89_RS_CCK }, + .num_of_idx = RTW89_RATE_CCK_NUM, + .no_over_bw40 = true, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_OFDM }, + .num_of_idx = RTW89_RATE_OFDM_NUM, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_NON_OFDMA }, + .num_of_idx = 2, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_OFDMA }, + .num_of_idx = 2, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_NON_OFDMA }, + .num_of_idx = 14, + }, + { + .init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_NON_OFDMA }, + .num_of_idx = RTW89_RATE_HEDCM_NUM, + }, + { + .init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_OFDMA }, + .num_of_idx = 14, + }, + { + .init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_OFDMA }, + .num_of_idx = RTW89_RATE_HEDCM_NUM, + }, +}; + +static +void __phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev, u8 band, u8 bw, + u8 nss, u32 *addr, enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_byr_spec_ent_be *ent; + struct rtw89_rate_desc desc; + int pos = 0; + int i, j; + u32 val; + s8 v[4]; + + for (i = 0; i < ARRAY_SIZE(rtw89_byr_spec_be); i++) { + ent = &rtw89_byr_spec_be[i]; + + if (bw > RTW89_CHANNEL_WIDTH_40 && ent->no_over_bw40) + continue; + if (nss > RTW89_NSS_1 && ent->no_multi_nss) + continue; + + desc = ent->init; + desc.nss = nss; + for (j = 0; j < ent->num_of_idx; j++, desc.idx++) { + v[pos] = rtw89_phy_read_txpwr_byrate(rtwdev, band, bw, + &desc); + pos = (pos + 1) % 4; + if (pos) + continue; + + val = u32_encode_bits(v[0], GENMASK(7, 0)) | + u32_encode_bits(v[1], GENMASK(15, 8)) | + u32_encode_bits(v[2], GENMASK(23, 16)) | + u32_encode_bits(v[3], GENMASK(31, 24)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, *addr, val); + *addr += 4; + } + } +} + +static void rtw89_phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + u32 addr = R_BE_PWR_BY_RATE; + u8 band = chan->band_type; + u8 bw, nss; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr byrate on band %d\n", band); + + for (bw = 0; bw <= RTW89_CHANNEL_WIDTH_320; bw++) + for (nss = 0; nss <= RTW89_NSS_2; nss++) + __phy_set_txpwr_byrate_be(rtwdev, band, bw, nss, + &addr, phy_idx); +} + +static void rtw89_phy_set_txpwr_offset_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_rate_desc desc = { + .nss = RTW89_NSS_1, + .rs = RTW89_RS_OFFSET, + }; + u8 band = chan->band_type; + s8 v[RTW89_RATE_OFFSET_NUM_BE] = {}; + u32 val; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr offset on band %d\n", band); + + for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_BE; desc.idx++) + v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); + + val = u32_encode_bits(v[RTW89_RATE_OFFSET_CCK], GENMASK(3, 0)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_OFDM], GENMASK(7, 4)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_HT], GENMASK(11, 8)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_VHT], GENMASK(15, 12)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_HE], GENMASK(19, 16)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_EHT], GENMASK(23, 20)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_HE], GENMASK(27, 24)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_EHT], GENMASK(31, 28)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_BE_PWR_RATE_OFST_CTRL, val); +} + +static void +fill_limit_nonbf_bf(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM], + u8 band, u8 bw, u8 ntx, u8 rs, u8 ch) +{ + int bf; + + for (bf = 0; bf < RTW89_BF_NUM; bf++) + (*ptr)[bf] = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, ntx, + rs, bf, ch); +} + +static void +fill_limit_nonbf_bf_min(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM], + u8 band, u8 bw, u8 ntx, u8 rs, u8 ch1, u8 ch2) +{ + s8 v1[RTW89_BF_NUM]; + s8 v2[RTW89_BF_NUM]; + int bf; + + fill_limit_nonbf_bf(rtwdev, &v1, band, bw, ntx, rs, ch1); + fill_limit_nonbf_bf(rtwdev, &v2, band, bw, ntx, rs, ch2); + + for (bf = 0; bf < RTW89_BF_NUM; bf++) + (*ptr)[bf] = min(v1[bf], v2[bf]); +} + +static void phy_fill_limit_20m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch); + fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch); + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, ch); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch); +} + +static void phy_fill_limit_40m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch); +} + +static void phy_fill_limit_80m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch); + + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 4, ch + 4); +} + +static void phy_fill_limit_160m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band, + RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch); + + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 12, ch - 4); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch + 4, ch + 12); +} + +static void phy_fill_limit_320m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 30); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 26); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 22); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 18); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[8], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[9], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[10], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[11], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[12], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 18); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[13], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 22); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[14], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 26); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[15], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 30); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 28); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 20); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[4], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[5], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[6], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 20); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[7], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 28); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 24); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[2], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[3], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 24); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band, + RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch - 16); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[1], band, + RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch + 16); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_320m, band, + RTW89_CHANNEL_WIDTH_320, ntx, RTW89_RS_MCS, ch); + + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 28, ch - 20); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 12, ch - 4); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_4p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch + 4, ch + 12); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_6p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch + 20, ch + 28); +} + +static void rtw89_phy_fill_limit_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_be *lmt, + u8 ntx) +{ + u8 band = chan->band_type; + u8 pri_ch = chan->primary_channel; + u8 ch = chan->channel; + u8 bw = chan->band_width; + + memset(lmt, 0, sizeof(*lmt)); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + phy_fill_limit_20m_be(rtwdev, lmt, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_40: + phy_fill_limit_40m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + case RTW89_CHANNEL_WIDTH_80: + phy_fill_limit_80m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + case RTW89_CHANNEL_WIDTH_160: + phy_fill_limit_160m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + case RTW89_CHANNEL_WIDTH_320: + phy_fill_limit_320m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + } +} + +static void rtw89_phy_set_txpwr_limit_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_txpwr_limit_be lmt; + const s8 *ptr; + u32 addr, val; + u8 i, j; + + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_be) != + RTW89_TXPWR_LMT_PAGE_SIZE_BE); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit on band %d bw %d\n", + chan->band_type, chan->band_width); + + addr = R_BE_PWR_LMT; + for (i = 0; i <= RTW89_NSS_2; i++) { + rtw89_phy_fill_limit_be(rtwdev, chan, &lmt, i); + + ptr = (s8 *)&lmt; + for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_BE; + j += 4, addr += 4, ptr += 4) { + val = u32_encode_bits(ptr[0], GENMASK(7, 0)) | + u32_encode_bits(ptr[1], GENMASK(15, 8)) | + u32_encode_bits(ptr[2], GENMASK(23, 16)) | + u32_encode_bits(ptr[3], GENMASK(31, 24)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } +} + +static void fill_limit_ru_each(struct rtw89_dev *rtwdev, u8 index, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + lmt_ru->ru26[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, ntx, ch); + lmt_ru->ru52[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52, ntx, ch); + lmt_ru->ru106[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106, ntx, ch); + lmt_ru->ru52_26[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52_26, ntx, ch); + lmt_ru->ru106_26[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106_26, ntx, ch); +} + +static void phy_fill_limit_ru_20m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch); +} + +static void phy_fill_limit_ru_40m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch + 2); +} + +static void phy_fill_limit_ru_80m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 6); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch + 2); + fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch + 6); +} + +static void phy_fill_limit_ru_160m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 14); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 10); + fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 6); + fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch + 2); + fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch + 6); + fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch + 10); + fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch + 14); +} + +static void phy_fill_limit_ru_320m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 30); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 26); + fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 22); + fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 18); + fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch - 14); + fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch - 10); + fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch - 6); + fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 8, lmt_ru, band, ntx, ch + 2); + fill_limit_ru_each(rtwdev, 9, lmt_ru, band, ntx, ch + 6); + fill_limit_ru_each(rtwdev, 10, lmt_ru, band, ntx, ch + 10); + fill_limit_ru_each(rtwdev, 11, lmt_ru, band, ntx, ch + 14); + fill_limit_ru_each(rtwdev, 12, lmt_ru, band, ntx, ch + 18); + fill_limit_ru_each(rtwdev, 13, lmt_ru, band, ntx, ch + 22); + fill_limit_ru_each(rtwdev, 14, lmt_ru, band, ntx, ch + 26); + fill_limit_ru_each(rtwdev, 15, lmt_ru, band, ntx, ch + 30); +} + +static void rtw89_phy_fill_limit_ru_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 ntx) +{ + u8 band = chan->band_type; + u8 ch = chan->channel; + u8 bw = chan->band_width; + + memset(lmt_ru, 0, sizeof(*lmt_ru)); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + phy_fill_limit_ru_20m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_40: + phy_fill_limit_ru_40m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_80: + phy_fill_limit_ru_80m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_160: + phy_fill_limit_ru_160m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_320: + phy_fill_limit_ru_320m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + } +} + +static void rtw89_phy_set_txpwr_limit_ru_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_txpwr_limit_ru_be lmt_ru; + const s8 *ptr; + u32 addr, val; + u8 i, j; + + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_be) != + RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit ru on band %d bw %d\n", + chan->band_type, chan->band_width); + + addr = R_BE_PWR_RU_LMT; + for (i = 0; i <= RTW89_NSS_2; i++) { + rtw89_phy_fill_limit_ru_be(rtwdev, chan, &lmt_ru, i); + + ptr = (s8 *)&lmt_ru; + for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE; + j += 4, addr += 4, ptr += 4) { + val = u32_encode_bits(ptr[0], GENMASK(7, 0)) | + u32_encode_bits(ptr[1], GENMASK(15, 8)) | + u32_encode_bits(ptr[2], GENMASK(23, 16)) | + u32_encode_bits(ptr[3], GENMASK(31, 24)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } +} + const struct rtw89_phy_gen_def rtw89_phy_gen_be = { .cr_base = 0x20000, .ccx = &rtw89_ccx_regs_be, .physts = &rtw89_physts_regs_be, + .cfo = &rtw89_cfo_regs_be, + + .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be, + .set_txpwr_offset = rtw89_phy_set_txpwr_offset_be, + .set_txpwr_limit = rtw89_phy_set_txpwr_limit_be, + .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_be, }; EXPORT_SYMBOL(rtw89_phy_gen_be); diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index c0aac4d367..672010b9e0 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2375,6 +2375,14 @@ #define R_AX_TSFTR_HIGH_P4 0xC53C #define B_AX_TSFTR_HIGH_MASK GENMASK(31, 0) +#define R_AX_BCN_DROP_ALL0 0xC560 +#define R_AX_BCN_DROP_ALL0_C1 0xE560 +#define B_AX_BCN_DROP_ALL_P4 BIT(4) +#define B_AX_BCN_DROP_ALL_P3 BIT(3) +#define B_AX_BCN_DROP_ALL_P2 BIT(2) +#define B_AX_BCN_DROP_ALL_P1 BIT(1) +#define B_AX_BCN_DROP_ALL_P0 BIT(0) + #define R_AX_MBSSID_CTRL 0xC568 #define R_AX_MBSSID_CTRL_C1 0xE568 #define B_AX_P0MB_ALL_MASK GENMASK(23, 1) @@ -2554,11 +2562,20 @@ #define R_AX_PTCL_DBG_INFO 0xC6F0 #define R_AX_PTCL_DBG_INFO_C1 0xE6F0 +#define B_AX_PTCL_DBG_INFO_MASK_BY_PORT(port) \ +({\ + typeof(port) _port = (port); \ + GENMASK((_port) * 2 + 1, (_port) * 2); \ +}) + #define B_AX_PTCL_DBG_INFO_MASK GENMASK(31, 0) #define R_AX_PTCL_DBG 0xC6F4 #define R_AX_PTCL_DBG_C1 0xE6F4 #define B_AX_PTCL_DBG_EN BIT(8) #define B_AX_PTCL_DBG_SEL_MASK GENMASK(7, 0) +#define AX_PTCL_DBG_BCNQ_NUM0 8 +#define AX_PTCL_DBG_BCNQ_NUM1 9 + #define R_AX_DLE_CTRL 0xC800 #define R_AX_DLE_CTRL_C1 0xE800 @@ -3360,9 +3377,11 @@ #define R_AX_PWR_UL_TB_1T 0xD28C #define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0) #define B_AX_PWR_UL_TB_1T_V1_MASK GENMASK(7, 0) +#define B_AX_PWR_UL_TB_1T_NORM_BW160 GENMASK(31, 24) #define R_AX_PWR_UL_TB_2T 0xD290 #define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0) #define B_AX_PWR_UL_TB_2T_V1_MASK GENMASK(7, 0) +#define B_AX_PWR_UL_TB_2T_NORM_BW160 GENMASK(31, 24) #define R_AX_PWR_BY_RATE_TABLE0 0xD2C0 #define R_AX_PWR_BY_RATE_TABLE6 0xD2D8 #define R_AX_PWR_BY_RATE_TABLE10 0xD2E8 @@ -3390,11 +3409,13 @@ #define AX_PATH_COM0_PATHB 0x11111900 #define AX_PATH_COM0_PATHAB 0x19999980 #define R_AX_PATH_COM1 0xD804 +#define B_AX_PATH_COM1_NORM_1STS GENMASK(31, 28) #define AX_PATH_COM1_DFVAL 0x00000000 #define AX_PATH_COM1_PATHA 0x13111111 #define AX_PATH_COM1_PATHB 0x23222222 #define AX_PATH_COM1_PATHAB 0x33333333 #define R_AX_PATH_COM2 0xD808 +#define B_AX_PATH_COM2_RESP_1STS_PATH GENMASK(7, 4) #define AX_PATH_COM2_DFVAL 0x00000000 #define AX_PATH_COM2_PATHA 0x01209313 #define AX_PATH_COM2_PATHB 0x01209323 @@ -3581,8 +3602,8 @@ #define R_AX_MACID_ANT_TABLE 0xDC00 #define R_AX_MACID_ANT_TABLE_LAST 0xDDFC -#define CMAC1_START_ADDR 0xE000 -#define CMAC1_END_ADDR 0xFFFF +#define CMAC1_START_ADDR_AX 0xE000 +#define CMAC1_END_ADDR_AX 0xFFFF #define R_AX_CMAC_REG_END 0xFFFF #define R_AX_LTE_SW_CFG_1 0x0038 @@ -3625,8 +3646,369 @@ #define B_AX_GNT_BT_TX_SW_VAL BIT(1) #define B_AX_GNT_BT_TX_SW_CTRL BIT(0) +#define R_BE_SYS_CLK_CTRL 0x0008 +#define B_BE_CPU_CLK_EN BIT(14) +#define B_BE_SYMR_BE_CLK_EN BIT(13) +#define B_BE_MAC_CLK_EN BIT(11) +#define B_BE_EXT_32K_EN BIT(8) +#define B_BE_WL_CLK_TEST BIT(7) +#define B_BE_LOADER_CLK_EN BIT(5) +#define B_BE_ANA_CLK_DIVISION_2 BIT(1) +#define B_BE_CNTD16V_EN BIT(0) + +#define R_BE_PLATFORM_ENABLE 0x0088 +#define B_BE_HOLD_AFTER_RESET BIT(11) +#define B_BE_SYM_WLPLT_MEM_MUX_EN BIT(10) +#define B_BE_WCPU_WARM_EN BIT(9) +#define B_BE_SPIC_EN BIT(8) +#define B_BE_UART_EN BIT(7) +#define B_BE_IDDMA_EN BIT(6) +#define B_BE_IPSEC_EN BIT(5) +#define B_BE_HIOE_EN BIT(4) +#define B_BE_APB_WRAP_EN BIT(2) +#define B_BE_WCPU_EN BIT(1) +#define B_BE_PLATFORM_EN BIT(0) + +#define R_BE_HALT_H2C_CTRL 0x0160 +#define B_BE_HALT_H2C_TRIGGER BIT(0) + +#define R_BE_HALT_C2H_CTRL 0x0164 +#define B_BE_HALT_C2H_TRIGGER BIT(0) + +#define R_BE_HALT_H2C 0x0168 +#define B_BE_HALT_H2C_MASK GENMASK(31, 0) + +#define R_BE_HALT_C2H 0x016C +#define B_BE_HALT_C2H_ERROR_SENARIO_MASK GENMASK(31, 28) +#define B_BE_ERROR_CODE_MASK GENMASK(15, 0) + +#define R_BE_SYS_CFG5 0x0170 +#define B_BE_WDT_DATACPU_WAKE_PCIE_EN BIT(12) +#define B_BE_WDT_DATACPU_WAKE_USB_EN BIT(11) +#define B_BE_WDT_WAKE_PCIE_EN BIT(10) +#define B_BE_WDT_WAKE_USB_EN BIT(9) +#define B_BE_SYM_DIS_HC_ACCESS_MAC BIT(8) +#define B_BE_LPS_STATUS BIT(3) +#define B_BE_HCI_TXDMA_BUSY BIT(2) + +#define R_BE_SECURE_BOOT_MALLOC_INFO 0x0184 + +#define R_BE_WCPU_FW_CTRL 0x01E0 +#define B_BE_RUN_ENV_MASK GENMASK(31, 30) +#define B_BE_WCPU_FWDL_STATUS_MASK GENMASK(29, 26) +#define B_BE_WDT_PLT_RST_EN BIT(17) +#define B_BE_FW_SEC_AUTH_DONE BIT(14) +#define B_BE_FW_CPU_UTIL_STS_EN BIT(13) +#define B_BE_BBMCU1_FWDL_EN BIT(12) +#define B_BE_BBMCU0_FWDL_EN BIT(11) +#define B_BE_DATACPU_FWDL_EN BIT(10) +#define B_BE_WLANCPU_FWDL_EN BIT(9) +#define B_BE_WCPU_ROM_CUT_GET BIT(8) +#define B_BE_WCPU_ROM_CUT_VAL_MASK GENMASK(7, 4) +#define B_BE_FW_BOOT_MODE_MASK GENMASK(3, 2) +#define B_BE_H2C_PATH_RDY BIT(1) +#define B_BE_DLFW_PATH_RDY BIT(0) + +#define R_BE_BOOT_REASON 0x01E6 +#define B_BE_BOOT_REASON_MASK GENMASK(2, 0) + +#define R_BE_LDM 0x01E8 +#define B_BE_EN_32K BIT(31) +#define B_BE_LDM_MASK GENMASK(30, 0) + +#define R_BE_UDM0 0x01F0 +#define B_BE_UDM0_SEND2RA_CNT_MASK GENMASK(31, 28) +#define B_BE_UDM0_TX_RPT_CNT_MASK GENMASK(27, 24) +#define B_BE_UDM0_FS_CODE_MASK GENMASK(23, 8) +#define B_BE_NULL_POINTER_INDC BIT(7) +#define B_BE_ROM_ASSERT_INDC BIT(6) +#define B_BE_RAM_ASSERT_INDC BIT(5) +#define B_BE_FW_IMAGE_TYPE BIT(4) +#define B_BE_UDM0_TRAP_LOOP_CTRL BIT(2) +#define B_BE_UDM0_SEND_HALTC2H_CTRL BIT(1) +#define B_BE_UDM0_DBG_MODE_CTRL BIT(0) + +#define R_BE_UDM1 0x01F4 +#define B_BE_UDM1_ERROR_ADDR_MASK GENMASK(31, 16) +#define B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK GENMASK(15, 12) +#define B_BE_UDM1_HALMAC_H2C_DEQ_CNT_MASK GENMASK(11, 8) +#define B_BE_UDM1_WCPU_C2H_ENQ_CNT_MASK GENMASK(7, 4) +#define B_BE_UDM1_WCPU_H2C_DEQ_CNT_MASK GENMASK(3, 0) + +#define R_BE_UDM2 0x01F8 +#define B_BE_UDM2_EPC_RA_MASK GENMASK(31, 0) + +#define R_BE_DCPU_PLATFORM_ENABLE 0x0888 +#define B_BE_DCPU_SYM_DPLT_MEM_MUX_EN BIT(10) +#define B_BE_DCPU_WARM_EN BIT(9) +#define B_BE_DCPU_UART_EN BIT(7) +#define B_BE_DCPU_IDDMA_EN BIT(6) +#define B_BE_DCPU_APB_WRAP_EN BIT(2) +#define B_BE_DCPU_EN BIT(1) +#define B_BE_DCPU_PLATFORM_EN BIT(0) + #define R_BE_FILTER_MODEL_ADDR 0x0C04 +#define R_BE_PLE_DBG_FUN_INTF_CTL 0x9110 +#define B_BE_PLE_DFI_ACTIVE BIT(31) +#define B_BE_PLE_DFI_TRGSEL_MASK GENMASK(19, 16) +#define B_BE_PLE_DFI_ADDR_MASK GENMASK(15, 0) + +#define R_BE_PLE_DBG_FUN_INTF_DATA 0x9114 +#define B_BE_PLE_DFI_DATA_MASK GENMASK(31, 0) + +#define R_BE_CMAC_FUNC_EN 0x10000 +#define R_BE_CMAC_FUNC_EN_C1 0x14000 +#define B_BE_CMAC_CRPRT BIT(31) +#define B_BE_CMAC_EN BIT(30) +#define B_BE_CMAC_TXEN BIT(29) +#define B_BE_CMAC_RXEN BIT(28) +#define B_BE_FORCE_RESP_PKTCTL_GCKEN BIT(26) +#define B_BE_FORCE_SIGB_REG_GCKEN BIT(25) +#define B_BE_FORCE_POWER_REG_GCKEN BIT(23) +#define B_BE_FORCE_RMAC_REG_GCKEN BIT(22) +#define B_BE_FORCE_TRXPTCL_REG_GCKEN BIT(21) +#define B_BE_FORCE_TMAC_REG_GCKEN BIT(20) +#define B_BE_FORCE_CMAC_DMA_REG_GCKEN BIT(19) +#define B_BE_FORCE_PTCL_REG_GCKEN BIT(18) +#define B_BE_FORCE_SCHEDULER_RREG_GCKEN BIT(17) +#define B_BE_FORCE_CMAC_COMMON_REG_GCKEN BIT(16) +#define B_BE_FORCE_CMACREG_GCKEN BIT(15) +#define B_BE_TXTIME_EN BIT(8) +#define B_BE_RESP_PKTCTL_EN BIT(7) +#define B_BE_SIGB_EN BIT(6) +#define B_BE_PHYINTF_EN BIT(5) +#define B_BE_CMAC_DMA_EN BIT(4) +#define B_BE_PTCLTOP_EN BIT(3) +#define B_BE_SCHEDULER_EN BIT(2) +#define B_BE_TMAC_EN BIT(1) +#define B_BE_RMAC_EN BIT(0) +#define B_BE_CMAC_FUNC_EN_SET (B_BE_CMAC_EN | B_BE_CMAC_TXEN | B_BE_CMAC_RXEN | \ + B_BE_PHYINTF_EN | B_BE_CMAC_DMA_EN | B_BE_PTCLTOP_EN | \ + B_BE_SCHEDULER_EN | B_BE_TMAC_EN | B_BE_RMAC_EN | \ + B_BE_CMAC_CRPRT | B_BE_TXTIME_EN | B_BE_RESP_PKTCTL_EN | \ + B_BE_SIGB_EN) + +#define R_BE_PORT_0_TSF_SYNC 0x102A0 +#define R_BE_PORT_0_TSF_SYNC_C1 0x142A0 +#define B_BE_P0_SYNC_NOW_P BIT(30) +#define B_BE_P0_SYNC_ONCE_P BIT(29) +#define B_BE_P0_AUTO_SYNC BIT(28) +#define B_BE_P0_SYNC_PORT_SRC_SEL_MASK GENMASK(26, 24) +#define B_BE_P0_TSFTR_SYNC_OFFSET_MASK GENMASK(18, 0) + +#define R_BE_MUEDCA_BE_PARAM_0 0x10350 +#define R_BE_MUEDCA_BK_PARAM_0 0x10354 +#define R_BE_MUEDCA_VI_PARAM_0 0x10358 +#define R_BE_MUEDCA_VO_PARAM_0 0x1035C + +#define R_BE_MUEDCA_EN 0x10370 +#define R_BE_MUEDCA_EN_C1 0x14370 +#define B_BE_MUEDCA_WMM_SEL BIT(8) +#define B_BE_SET_MUEDCATIMER_TF_1 BIT(5) +#define B_BE_SET_MUEDCATIMER_TF_0 BIT(4) +#define B_BE_MUEDCA_EN_0 BIT(0) + +#define R_BE_PORT_CFG_P0 0x10400 +#define R_BE_PORT_CFG_P0_C1 0x14400 +#define B_BE_BCN_ERLY_SORT_EN_P0 BIT(18) +#define B_BE_PROHIB_END_CAL_EN_P0 BIT(17) +#define B_BE_BRK_SETUP_P0 BIT(16) +#define B_BE_TBTT_UPD_SHIFT_SEL_P0 BIT(15) +#define B_BE_BCN_DROP_ALLOW_P0 BIT(14) +#define B_BE_TBTT_PROHIB_EN_P0 BIT(13) +#define B_BE_BCNTX_EN_P0 BIT(12) +#define B_BE_NET_TYPE_P0_MASK GENMASK(11, 10) +#define B_BE_BCN_FORCETX_EN_P0 BIT(9) +#define B_BE_TXBCN_BTCCA_EN_P0 BIT(8) +#define B_BE_BCNERR_CNT_EN_P0 BIT(7) +#define B_BE_BCN_AGRES_P0 BIT(6) +#define B_BE_TSFTR_RST_P0 BIT(5) +#define B_BE_RX_BSSID_FIT_EN_P0 BIT(4) +#define B_BE_TSF_UDT_EN_P0 BIT(3) +#define B_BE_PORT_FUNC_EN_P0 BIT(2) +#define B_BE_TXBCN_RPT_EN_P0 BIT(1) +#define B_BE_RXBCN_RPT_EN_P0 BIT(0) + +#define R_BE_TBTT_PROHIB_P0 0x10404 +#define R_BE_TBTT_PROHIB_P0_C1 0x14404 +#define B_BE_TBTT_HOLD_P0_MASK GENMASK(27, 16) +#define B_BE_TBTT_SETUP_P0_MASK GENMASK(7, 0) + +#define R_BE_BCN_AREA_P0 0x10408 +#define R_BE_BCN_AREA_P0_C1 0x14408 +#define B_BE_BCN_MSK_AREA_P0_MSK 0xfff +#define B_BE_BCN_CTN_AREA_P0_MASK GENMASK(11, 0) + +#define R_BE_BCNERLYINT_CFG_P0 0x1040C +#define R_BE_BCNERLYINT_CFG_P0_C1 0x1440C +#define B_BE_BCNERLY_P0_MASK GENMASK(11, 0) + +#define R_BE_TBTTERLYINT_CFG_P0 0x1040E +#define R_BE_TBTTERLYINT_CFG_P0_C1 0x1440E +#define B_BE_TBTTERLY_P0_MASK GENMASK(11, 0) + +#define R_BE_TBTT_AGG_P0 0x10412 +#define R_BE_TBTT_AGG_P0_C1 0x14412 +#define B_BE_TBTT_AGG_NUM_P0_MASK GENMASK(15, 8) + +#define R_BE_BCN_SPACE_CFG_P0 0x10414 +#define R_BE_BCN_SPACE_CFG_P0_C1 0x14414 +#define B_BE_SUB_BCN_SPACE_P0_MASK GENMASK(23, 16) +#define B_BE_BCN_SPACE_P0_MASK GENMASK(15, 0) + +#define R_BE_BCN_FORCETX_P0 0x10418 +#define R_BE_BCN_FORCETX_P0_C1 0x14418 +#define B_BE_FORCE_BCN_NUM_P0_MASK GENMASK(15, 8) +#define B_BE_BCN_MAX_ERR_P0_MASK GENMASK(7, 0) + +#define R_BE_BCN_ERR_CNT_P0 0x10420 +#define R_BE_BCN_ERR_CNT_P0_C1 0x14420 +#define B_BE_BCN_ERR_CNT_SUM_P0_MASK GENMASK(31, 24) +#define B_BE_BCN_ERR_CNT_NAV_P0_MASK GENMASK(23, 16) +#define B_BE_BCN_ERR_CNT_EDCCA_P0_MASK GENMASK(15, 8) +#define B_BE_BCN_ERR_CNT_CCA_P0_MASK GENMASK(7, 0) + +#define R_BE_BCN_ERR_FLAG_P0 0x10424 +#define R_BE_BCN_ERR_FLAG_P0_C1 0x14424 +#define B_BE_BCN_ERR_FLAG_SRCHEND_P0 BIT(3) +#define B_BE_BCN_ERR_FLAG_INVALID_P0 BIT(2) +#define B_BE_BCN_ERR_FLAG_CMP_P0 BIT(1) +#define B_BE_BCN_ERR_FLAG_LOCK_P0 BIT(0) + +#define R_BE_DTIM_CTRL_P0 0x10426 +#define R_BE_DTIM_CTRL_P0_C1 0x14426 +#define B_BE_DTIM_NUM_P0_MASK GENMASK(15, 8) +#define B_BE_DTIM_CURRCNT_P0_MASK GENMASK(7, 0) + +#define R_BE_TBTT_SHIFT_P0 0x10428 +#define R_BE_TBTT_SHIFT_P0_C1 0x14428 +#define B_BE_TBTT_SHIFT_OFST_P0_SH 0 +#define B_BE_TBTT_SHIFT_OFST_P0_MSK 0xfff + +#define R_BE_BCN_CNT_TMR_P0 0x10434 +#define R_BE_BCN_CNT_TMR_P0_C1 0x14434 +#define B_BE_BCN_CNT_TMR_P0_MASK GENMASK(31, 0) + +#define R_BE_TSFTR_LOW_P0 0x10438 +#define R_BE_TSFTR_LOW_P0_C1 0x14438 +#define B_BE_TSFTR_LOW_P0_MASK GENMASK(31, 0) + +#define R_BE_TSFTR_HIGH_P0 0x1043C +#define R_BE_TSFTR_HIGH_P0_C1 0x1443C +#define B_BE_TSFTR_HIGH_P0_MASK GENMASK(31, 0) + +#define R_BE_MBSSID_CTRL 0x10568 +#define R_BE_MBSSID_CTRL_C1 0x14568 +#define B_BE_MBSSID_MODE_SEL BIT(20) +#define B_BE_P0MB_NUM_MASK GENMASK(19, 16) +#define B_BE_P0MB15_EN BIT(15) +#define B_BE_P0MB14_EN BIT(14) +#define B_BE_P0MB13_EN BIT(13) +#define B_BE_P0MB12_EN BIT(12) +#define B_BE_P0MB11_EN BIT(11) +#define B_BE_P0MB10_EN BIT(10) +#define B_BE_P0MB9_EN BIT(9) +#define B_BE_P0MB8_EN BIT(8) +#define B_BE_P0MB7_EN BIT(7) +#define B_BE_P0MB6_EN BIT(6) +#define B_BE_P0MB5_EN BIT(5) +#define B_BE_P0MB4_EN BIT(4) +#define B_BE_P0MB3_EN BIT(3) +#define B_BE_P0MB2_EN BIT(2) +#define B_BE_P0MB1_EN BIT(1) + +#define R_BE_P0MB_HGQ_WINDOW_CFG_0 0x10590 +#define R_BE_P0MB_HGQ_WINDOW_CFG_0_C1 0x14590 +#define R_BE_PORT_HGQ_WINDOW_CFG 0x105A0 +#define R_BE_PORT_HGQ_WINDOW_CFG_C1 0x145A0 + +#define R_BE_AGG_LEN_HT_0 0x10814 +#define R_BE_AGG_LEN_HT_0_C1 0x14814 +#define B_BE_AMPDU_MAX_LEN_HT_MASK GENMASK(31, 16) +#define B_BE_RTS_TXTIME_TH_MASK GENMASK(15, 8) +#define B_BE_RTS_LEN_TH_MASK GENMASK(7, 0) + +#define R_BE_MBSSID_DROP_0 0x1083C +#define R_BE_MBSSID_DROP_0_C1 0x1483C +#define B_BE_GI_LTF_FB_SEL BIT(30) +#define B_BE_RATE_SEL_MASK GENMASK(29, 24) +#define B_BE_PORT_DROP_4_0_MASK GENMASK(20, 16) +#define B_BE_MBSSID_DROP_15_0_MASK GENMASK(15, 0) + +#define R_BE_PTCL_BSS_COLOR_0 0x108A0 +#define R_BE_PTCL_BSS_COLOR_0_C1 0x148A0 +#define B_BE_BSS_COLOB_BE_PORT_3_MASK GENMASK(29, 24) +#define B_BE_BSS_COLOB_BE_PORT_2_MASK GENMASK(21, 16) +#define B_BE_BSS_COLOB_BE_PORT_1_MASK GENMASK(13, 8) +#define B_BE_BSS_COLOB_BE_PORT_0_MASK GENMASK(5, 0) + +#define R_BE_PTCL_BSS_COLOR_1 0x108A4 +#define R_BE_PTCL_BSS_COLOR_1_C1 0x148A4 +#define B_BE_BSS_COLOB_BE_PORT_4_MASK GENMASK(5, 0) + +#define R_BE_WMTX_MOREDATA_TSFT_STMP_CTL 0x10E08 +#define R_BE_WMTX_MOREDATA_TSFT_STMP_CTL_C1 0x14E08 +#define B_BE_TSFT_OFS_MASK GENMASK(31, 16) +#define B_BE_STMP_THSD_MASK GENMASK(15, 8) +#define B_BE_UPD_HGQMD BIT(1) +#define B_BE_UPD_TIMIE BIT(0) + +#define R_BE_BFMEE_RESP_OPTION 0x11180 +#define R_BE_BFMEE_RESP_OPTION_C1 0x15180 +#define B_BE_BFMEE_CSI_SEC_TYPE_SH 20 +#define B_BE_BFMEE_CSI_SEC_TYPE_MSK 0xf +#define B_BE_BFMEE_BFRPT_SEG_SIZE_SH 16 +#define B_BE_BFMEE_BFRPT_SEG_SIZE_MSK 0x3 +#define B_BE_BFMEE_MIMO_EN_SEL BIT(8) +#define B_BE_BFMEE_MU_BFEE_DIS BIT(7) +#define B_BE_BFMEE_CHECK_RPTPOLL_MACID_DIS BIT(6) +#define B_BE_BFMEE_NOCHK_BFPOLL_BMP BIT(5) +#define B_BE_BFMEE_VHTBFRPT_CHK BIT(4) +#define B_BE_BFMEE_EHT_NDPA_EN BIT(3) +#define B_BE_BFMEE_HE_NDPA_EN BIT(2) +#define B_BE_BFMEE_VHT_NDPA_EN BIT(1) +#define B_BE_BFMEE_HT_NDPA_EN BIT(0) + +#define R_BE_TRXPTCL_RESP_CSI_CTRL_0 0x11188 +#define R_BE_TRXPTCL_RESP_CSI_CTRL_0_C1 0x15188 +#define B_BE_BFMEE_CSISEQ_SEL BIT(29) +#define B_BE_BFMEE_BFPARAM_SEL BIT(28) +#define B_BE_BFMEE_OFDM_LEN_TH_MASK GENMASK(27, 24) +#define B_BE_BFMEE_BF_PORT_SEL BIT(23) +#define B_BE_BFMEE_USE_NSTS BIT(22) +#define B_BE_BFMEE_CSI_RATE_FB_EN BIT(21) +#define B_BE_BFMEE_CSI_GID_SEL BIT(20) +#define B_BE_BFMEE_CSI_RSC_MASK GENMASK(19, 18) +#define B_BE_BFMEE_CSI_FORCE_RETE_EN BIT(17) +#define B_BE_BFMEE_CSI_USE_NDPARATE BIT(16) +#define B_BE_BFMEE_CSI_WITHHTC_EN BIT(15) +#define B_BE_BFMEE_CSIINFO0_BF_EN BIT(14) +#define B_BE_BFMEE_CSIINFO0_STBC_EN BIT(13) +#define B_BE_BFMEE_CSIINFO0_LDPC_EN BIT(12) +#define B_BE_BFMEE_CSIINFO0_CS_MASK GENMASK(11, 10) +#define B_BE_BFMEE_CSIINFO0_CB_MASK GENMASK(9, 8) +#define B_BE_BFMEE_CSIINFO0_NG_MASK GENMASK(7, 6) +#define B_BE_BFMEE_CSIINFO0_NR_MASK GENMASK(5, 3) +#define B_BE_BFMEE_CSIINFO0_NC_MASK GENMASK(2, 0) +#define CSI_RX_BW_CFG 0x1 +#define R_BE_TRXPTCL_RESP_CSI_CTRL_1 0x11194 +#define R_BE_TRXPTCL_RESP_CSI_CTRL_1_C1 0x15194 +#define B_BE_BFMEE_BE_CSI_RRSC_BITMAP_MASK GENMASK(31, 24) +#define CSI_RRSC_BITMAP_CFG 0x2A + +#define R_BE_TRXPTCL_RESP_CSI_RRSC 0x1118C +#define R_BE_TRXPTCL_RESP_CSI_RRSC_C1 0x1518C +#define CSI_RRSC_BMAP_BE 0x2A2AFF + +#define R_BE_TRXPTCL_RESP_CSI_RATE 0x11190 +#define R_BE_TRXPTCL_RESP_CSI_RATE_C1 0x15190 +#define B_BE_BFMEE_EHT_CSI_RATE_MASK GENMASK(31, 24) +#define B_BE_BFMEE_HE_CSI_RATE_MASK GENMASK(23, 16) +#define B_BE_BFMEE_VHT_CSI_RATE_MASK GENMASK(15, 8) +#define B_BE_BFMEE_HT_CSI_RATE_MASK GENMASK(7, 0) +#define CSI_INIT_RATE_EHT 0x3 + #define R_BE_RX_FLTR_OPT 0x11420 #define R_BE_RX_FLTR_OPT_C1 0x15420 #define B_BE_UID_FILTER_MASK GENMASK(31, 24) @@ -3646,6 +4028,26 @@ #define B_BE_A_A1_MATCH BIT(1) #define B_BE_SNIFFER_MODE BIT(0) +#define R_BE_CSIRPT_OPTION 0x11464 +#define R_BE_CSIRPT_OPTION_C1 0x15464 +#define B_BE_CSIPRT_EHTSU_AID_EN BIT(26) +#define B_BE_CSIPRT_HESU_AID_EN BIT(25) +#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24) + +#define R_BE_PWR_MODULE 0x11900 +#define R_BE_PWR_MODULE_C1 0x15900 + +#define R_BE_PWR_RATE_OFST_CTRL 0x11A30 +#define R_BE_PWR_BY_RATE 0x11E00 +#define R_BE_PWR_BY_RATE_MAX 0x11FA8 +#define R_BE_PWR_LMT 0x11FAC +#define R_BE_PWR_LMT_MAX 0x12040 +#define R_BE_PWR_RU_LMT 0x12048 +#define R_BE_PWR_RU_LMT_MAX 0x120E4 + +#define CMAC1_START_ADDR_BE 0x14000 +#define CMAC1_END_ADDR_BE 0x17FFF + #define RR_MOD 0x00 #define RR_MOD_V1 0x10000 #define RR_MOD_IQK GENMASK(19, 4) @@ -4413,12 +4815,20 @@ #define B_ANT_RX_1RCCA_SEG1 GENMASK(21, 18) #define B_ANT_RX_1RCCA_SEG0 GENMASK(17, 14) #define B_FC0_BW_INV GENMASK(6, 0) +#define R_Q_MATRIX_00 0x497C +#define B_Q_MATRIX_00_IMAGINARY GENMASK(15, 0) +#define B_Q_MATRIX_00_REAL GENMASK(31, 16) #define R_CHBW_MOD 0x4978 #define R_CHBW_MOD_V1 0x49C4 #define B_BT_SHARE BIT(14) #define B_CHBW_MOD_SBW GENMASK(13, 12) #define B_CHBW_MOD_PRICH GENMASK(11, 8) #define B_ANT_RX_SEG0 GENMASK(3, 0) +#define R_Q_MATRIX_11 0x4988 +#define B_Q_MATRIX_11_IMAGINARY GENMASK(15, 0) +#define B_Q_MATRIX_11_REAL GENMASK(31, 16) +#define R_CUSTOMIZE_Q_MATRIX 0x498C +#define B_CUSTOMIZE_Q_MATRIX_EN BIT(0) #define R_P0_RPL1 0x49B0 #define B_P0_RPL1_41_MASK GENMASK(31, 24) #define B_P0_RPL1_40_MASK GENMASK(23, 16) @@ -4539,6 +4949,8 @@ #define B_P0_TSSI_ALIM2 GENMASK(29, 0) #define R_P0_TSSI_ALIM4 0x5640 #define R_TSSI_PA_K8 0x5644 +#define R_P0_TSSI_ADC_CLK 0x566c +#define B_P0_TSSI_ADC_CLK GENMASK(17, 16) #define R_UPD_CLK 0x5670 #define B_DAC_VAL BIT(31) #define B_ACK_VAL GENMASK(30, 29) @@ -4619,6 +5031,8 @@ #define R_TXGAIN_SCALE 0x58F0 #define B_TXGAIN_SCALE_EN BIT(19) #define B_TXGAIN_SCALE_OFT GENMASK(31, 24) +#define R_P0_DAC_COMP_POST_DPD_EN 0x58F8 +#define B_P0_DAC_COMP_POST_DPD_EN BIT(31) #define R_P0_TSSI_BASE 0x5C00 #define R_S0_DACKI 0x5E00 #define B_S0_DACKI_AR GENMASK(31, 28) @@ -4638,6 +5052,10 @@ #define B_S0_DACKQ7_K GENMASK(15, 8) #define R_S0_DACKQ8 0x5E98 #define B_S0_DACKQ8_K GENMASK(15, 8) +#define R_DCFO_WEIGHT_V1 0x6244 +#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28) +#define R_DCFO_OPT_V1 0x6260 +#define B_DCFO_OPT_EN_V1 BIT(17) #define R_RPL_BIAS_COMP1 0x6DF0 #define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0) #define R_P1_TSSI_ALIM1 0x7630 @@ -4649,6 +5067,8 @@ #define B_P1_TSSI_ALIM31 GENMASK(9, 0) #define R_P1_TSSI_ALIM2 0x763c #define B_P1_TSSI_ALIM2 GENMASK(29, 0) +#define R_P1_TSSI_ADC_CLK 0x766c +#define B_P1_TSSI_ADC_CLK GENMASK(17, 16) #define R_P1_TSSIC 0x7814 #define B_P1_TSSIC_BYPASS BIT(11) #define R_P1_TMETER 0x7810 @@ -4675,6 +5095,8 @@ #define B_P1_TSSI_MV_MIX GENMASK(19, 11) #define B_P1_TSSI_MV_AVG GENMASK(13, 11) #define B_P1_TSSI_MV_CLR BIT(14) +#define R_P1_DAC_COMP_POST_DPD_EN 0x78F8 +#define B_P1_DAC_COMP_POST_DPD_EN BIT(31) #define R_TSSI_THOF 0x7C00 #define R_S1_DACKI 0x7E00 #define B_S1_DACKI_AR GENMASK(31, 28) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 9e2328db18..ca99422e60 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -115,7 +115,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("TH", RTW89_WW, RTW89_WW, RTW89_WW), + COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_THAILAND), COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), @@ -377,7 +377,7 @@ bottom: return; wiphy->bands[NL80211_BAND_6GHZ] = NULL; - kfree(sband->iftype_data); + kfree((__force void *)sband->iftype_data); kfree(sband); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index 103893f28b..50522ff850 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -1704,10 +1704,11 @@ static void rtw8851b_set_tx_shape(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) { + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = rtw89_8851b_tx_shape[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = rtw89_8851b_tx_shape[band][RTW89_RS_OFDM][regd]; + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; if (band == RTW89_BAND_2G) rtw8851b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); @@ -1778,14 +1779,15 @@ rtw8851b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return 0; } -static void rtw8851b_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8851b_btc_preagc_en_defs_tbl : + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8851b_btc_preagc_en_defs_tbl : &rtw8851b_btc_preagc_dis_defs_tbl); - if (!bt_en) { + if (!en) { if (chan->band_type == RTW89_BAND_2G) { rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1, B_PATH0_G_LNA6_OP1DB_V1, 0x20); @@ -1800,11 +1802,12 @@ static void rtw8851b_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) } } -static void rtw8851b_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, B_PATH0_BT_SHARE_V1, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, @@ -2280,6 +2283,7 @@ static int rtw8851b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) static const struct rtw89_chip_ops rtw8851b_chip_ops = { .enable_bb_rf = rtw8851b_mac_enable_bb_rf, .disable_bb_rf = rtw8851b_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8851b_bb_reset, .bb_sethw = rtw8851b_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, @@ -2300,9 +2304,9 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = { .set_txpwr_ctrl = rtw8851b_set_txpwr_ctrl, .init_txpwr_unit = rtw8851b_init_txpwr_unit, .get_thermal = rtw8851b_get_thermal, - .ctrl_btg = rtw8851b_ctrl_btg, + .ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx, .query_ppdu = rtw8851b_query_ppdu, - .bb_ctrl_btc_preagc = rtw8851b_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8851b_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8851b_pwr_on_func, @@ -2345,6 +2349,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .fw_basename = RTW8851B_FW_BASENAME, .fw_format_max = RTW8851B_FW_FORMAT_MAX, .try_ce_fw = true, + .bbmcu_nr = 0, .needed_fw_elms = 0, .fifo_size = 196608, .small_fifo_size = true, @@ -2364,7 +2369,6 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .rf_table = {&rtw89_8851b_phy_radioa_table,}, .nctl_table = &rtw89_8851b_phy_nctl_table, .nctl_post_table = &rtw8851b_nctl_post_defs_tbl, - .byr_table = &rtw89_8851b_byr_table, .dflt_parms = &rtw89_8851b_dflt_parms, .rfe_parms_conf = rtw89_8851b_rfe_parms_conf, .txpwr_factor_rf = 2, @@ -2377,7 +2381,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = { BIT(NL80211_BAND_5GHZ), .support_bw160 = false, .support_unii4 = true, - .support_ul_tb_ctrl = true, + .ul_tb_waveform_ctrl = true, + .ul_tb_pwr_diff = false, .hw_sec_hdr = false, .rf_path_num = 1, .tx_nss = 1, @@ -2419,6 +2424,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), .txwd_body_size = sizeof(struct rtw89_txwd_body), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8851b_h2c_regs, @@ -2432,6 +2438,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .dcfo_comp_sft = 12, .imr_info = &rtw8851b_imr_info, .rrsr_cfgs = &rtw8851b_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP_V1, .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) | BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) | diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c index c447f91a4b..8cb5bde8f6 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c @@ -3246,13 +3246,51 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = { }; static const struct rtw89_txpwr_byrate_cfg rtw89_8851b_txpwr_byrate[] = { + { 0, 0, 0, 0, 4, 0x50505050, }, + { 0, 0, 1, 0, 4, 0x58585858, }, + { 0, 0, 1, 4, 4, 0x484c5054, }, + { 0, 0, 2, 0, 4, 0x54585858, }, + { 0, 0, 2, 4, 4, 0x44484c50, }, + { 0, 0, 2, 8, 4, 0x34383c40, }, + { 0, 0, 3, 0, 4, 0x58585858, }, + { 0, 1, 2, 0, 4, 0x50545858, }, + { 0, 1, 2, 4, 4, 0x4044484c, }, + { 0, 1, 2, 8, 4, 0x3034383c, }, + { 0, 1, 3, 0, 4, 0x50505050, }, + { 0, 0, 4, 1, 4, 0x00000000, }, + { 0, 0, 4, 0, 1, 0x00000000, }, + { 1, 0, 1, 0, 4, 0x58585858, }, + { 1, 0, 1, 4, 4, 0x484c5054, }, + { 1, 0, 2, 0, 4, 0x54585858, }, + { 1, 0, 2, 4, 4, 0x44484c50, }, + { 1, 0, 2, 8, 4, 0x34383c40, }, + { 1, 0, 3, 0, 4, 0x54585858, }, + { 1, 1, 2, 0, 4, 0x54585858, }, + { 1, 1, 2, 4, 4, 0x44484c50, }, + { 1, 1, 2, 8, 4, 0x34383c40, }, + { 1, 1, 3, 0, 4, 0x48484848, }, + { 1, 0, 4, 0, 4, 0x00000000, }, + { 2, 0, 1, 0, 4, 0x40404040, }, + { 2, 0, 1, 4, 4, 0x383c4040, }, + { 2, 0, 2, 0, 4, 0x40404040, }, + { 2, 0, 2, 4, 4, 0x34383c40, }, + { 2, 0, 2, 8, 4, 0x24282c30, }, + { 2, 0, 3, 0, 4, 0x40404040, }, + { 2, 1, 2, 0, 4, 0x40404040, }, + { 2, 1, 2, 4, 4, 0x34383c40, }, + { 2, 1, 2, 8, 4, 0x24282c30, }, + { 2, 1, 3, 0, 4, 0x40404040, }, + { 2, 0, 4, 0, 4, 0x00000000, }, +}; + +static const struct rtw89_txpwr_byrate_cfg rtw89_8851b_txpwr_byrate_type2[] = { { 0, 0, 0, 0, 4, 0x50505050, }, { 0, 0, 1, 0, 4, 0x54585858, }, { 0, 0, 1, 4, 4, 0x44484c50, }, { 0, 0, 2, 0, 4, 0x50545858, }, { 0, 0, 2, 4, 4, 0x4044484c, }, { 0, 0, 2, 8, 4, 0x3034383c, }, - { 0, 0, 3, 0, 4, 0x50505050, }, + { 0, 0, 3, 0, 4, 0x58585858, }, { 0, 1, 2, 0, 4, 0x50545858, }, { 0, 1, 2, 4, 4, 0x4044484c, }, { 0, 1, 2, 8, 4, 0x3034383c, }, @@ -3264,7 +3302,7 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8851b_txpwr_byrate[] = { { 1, 0, 2, 0, 4, 0x54585858, }, { 1, 0, 2, 4, 4, 0x44484c50, }, { 1, 0, 2, 8, 4, 0x34383c40, }, - { 1, 0, 3, 0, 4, 0x40404040, }, + { 1, 0, 3, 0, 4, 0x54585858, }, { 1, 1, 2, 0, 4, 0x54585858, }, { 1, 1, 2, 4, 4, 0x44484c50, }, { 1, 1, 2, 8, 4, 0x34383c40, }, @@ -3321,8 +3359,9 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4 }; -const u8 rtw89_8851b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM] = { +static +const u8 rtw89_8851b_tx_shape_lmt[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { [0][0][RTW89_ACMA] = 0, [0][0][RTW89_CN] = 0, [0][0][RTW89_ETSI] = 0, @@ -3342,13 +3381,33 @@ const u8 rtw89_8851b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [1][1][RTW89_ACMA] = 0, [1][1][RTW89_CN] = 0, [1][1][RTW89_ETSI] = 0, - [1][1][RTW89_FCC] = 1, - [1][1][RTW89_IC] = 1, + [1][1][RTW89_FCC] = 3, + [1][1][RTW89_IC] = 3, [1][1][RTW89_KCC] = 0, [1][1][RTW89_MKK] = 0, [1][1][RTW89_UK] = 0, }; +static +const u8 rtw89_8851b_tx_shape_lmt_ru[RTW89_BAND_NUM][RTW89_REGD_NUM] = { + [0][RTW89_ACMA] = 0, + [0][RTW89_CN] = 0, + [0][RTW89_ETSI] = 0, + [0][RTW89_FCC] = 3, + [0][RTW89_IC] = 3, + [0][RTW89_KCC] = 0, + [0][RTW89_MKK] = 0, + [0][RTW89_UK] = 0, + [1][RTW89_ACMA] = 0, + [1][RTW89_CN] = 0, + [1][RTW89_ETSI] = 0, + [1][RTW89_FCC] = 3, + [1][RTW89_IC] = 3, + [1][RTW89_KCC] = 0, + [1][RTW89_MKK] = 0, + [1][RTW89_UK] = 0, +}; + static const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] @@ -3365,7 +3424,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_WW][9] = 58, [0][0][0][0][RTW89_WW][10] = 58, [0][0][0][0][RTW89_WW][11] = 58, - [0][0][0][0][RTW89_WW][12] = 52, + [0][0][0][0][RTW89_WW][12] = 50, [0][0][0][0][RTW89_WW][13] = 76, [0][1][0][0][RTW89_WW][0] = 0, [0][1][0][0][RTW89_WW][1] = 0, @@ -3391,7 +3450,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][7] = 58, [1][0][0][0][RTW89_WW][8] = 58, [1][0][0][0][RTW89_WW][9] = 58, - [1][0][0][0][RTW89_WW][10] = 58, + [1][0][0][0][RTW89_WW][10] = 50, [1][0][0][0][RTW89_WW][11] = 0, [1][0][0][0][RTW89_WW][12] = 0, [1][0][0][0][RTW89_WW][13] = 0, @@ -3421,7 +3480,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][9] = 60, [0][0][1][0][RTW89_WW][10] = 60, [0][0][1][0][RTW89_WW][11] = 60, - [0][0][1][0][RTW89_WW][12] = 58, + [0][0][1][0][RTW89_WW][12] = 40, [0][0][1][0][RTW89_WW][13] = 0, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][1] = 0, @@ -3449,7 +3508,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][9] = 60, [0][0][2][0][RTW89_WW][10] = 60, [0][0][2][0][RTW89_WW][11] = 60, - [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][12] = 38, [0][0][2][0][RTW89_WW][13] = 0, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][1] = 0, @@ -3489,7 +3548,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][7] = 58, [1][0][2][0][RTW89_WW][8] = 58, [1][0][2][0][RTW89_WW][9] = 58, - [1][0][2][0][RTW89_WW][10] = 58, + [1][0][2][0][RTW89_WW][10] = 46, [1][0][2][0][RTW89_WW][11] = 0, [1][0][2][0][RTW89_WW][12] = 0, [1][0][2][0][RTW89_WW][13] = 0, @@ -3527,7 +3586,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][0] = 84, [0][0][0][0][RTW89_KCC][0] = 68, [0][0][0][0][RTW89_ACMA][0] = 58, - [0][0][0][0][RTW89_CN][0] = 60, + [0][0][0][0][RTW89_CN][0] = 58, [0][0][0][0][RTW89_UK][0] = 58, [0][0][0][0][RTW89_FCC][1] = 84, [0][0][0][0][RTW89_ETSI][1] = 58, @@ -3535,7 +3594,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][1] = 84, [0][0][0][0][RTW89_KCC][1] = 68, [0][0][0][0][RTW89_ACMA][1] = 58, - [0][0][0][0][RTW89_CN][1] = 60, + [0][0][0][0][RTW89_CN][1] = 58, [0][0][0][0][RTW89_UK][1] = 58, [0][0][0][0][RTW89_FCC][2] = 84, [0][0][0][0][RTW89_ETSI][2] = 58, @@ -3543,7 +3602,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][2] = 84, [0][0][0][0][RTW89_KCC][2] = 68, [0][0][0][0][RTW89_ACMA][2] = 58, - [0][0][0][0][RTW89_CN][2] = 60, + [0][0][0][0][RTW89_CN][2] = 58, [0][0][0][0][RTW89_UK][2] = 58, [0][0][0][0][RTW89_FCC][3] = 84, [0][0][0][0][RTW89_ETSI][3] = 58, @@ -3551,7 +3610,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][3] = 84, [0][0][0][0][RTW89_KCC][3] = 68, [0][0][0][0][RTW89_ACMA][3] = 58, - [0][0][0][0][RTW89_CN][3] = 60, + [0][0][0][0][RTW89_CN][3] = 58, [0][0][0][0][RTW89_UK][3] = 58, [0][0][0][0][RTW89_FCC][4] = 84, [0][0][0][0][RTW89_ETSI][4] = 58, @@ -3559,7 +3618,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][4] = 84, [0][0][0][0][RTW89_KCC][4] = 68, [0][0][0][0][RTW89_ACMA][4] = 58, - [0][0][0][0][RTW89_CN][4] = 60, + [0][0][0][0][RTW89_CN][4] = 58, [0][0][0][0][RTW89_UK][4] = 58, [0][0][0][0][RTW89_FCC][5] = 84, [0][0][0][0][RTW89_ETSI][5] = 58, @@ -3567,7 +3626,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][5] = 84, [0][0][0][0][RTW89_KCC][5] = 68, [0][0][0][0][RTW89_ACMA][5] = 58, - [0][0][0][0][RTW89_CN][5] = 60, + [0][0][0][0][RTW89_CN][5] = 58, [0][0][0][0][RTW89_UK][5] = 58, [0][0][0][0][RTW89_FCC][6] = 84, [0][0][0][0][RTW89_ETSI][6] = 58, @@ -3575,7 +3634,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][6] = 84, [0][0][0][0][RTW89_KCC][6] = 68, [0][0][0][0][RTW89_ACMA][6] = 58, - [0][0][0][0][RTW89_CN][6] = 60, + [0][0][0][0][RTW89_CN][6] = 58, [0][0][0][0][RTW89_UK][6] = 58, [0][0][0][0][RTW89_FCC][7] = 84, [0][0][0][0][RTW89_ETSI][7] = 58, @@ -3583,7 +3642,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][7] = 84, [0][0][0][0][RTW89_KCC][7] = 68, [0][0][0][0][RTW89_ACMA][7] = 58, - [0][0][0][0][RTW89_CN][7] = 60, + [0][0][0][0][RTW89_CN][7] = 58, [0][0][0][0][RTW89_UK][7] = 58, [0][0][0][0][RTW89_FCC][8] = 84, [0][0][0][0][RTW89_ETSI][8] = 58, @@ -3591,7 +3650,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][8] = 84, [0][0][0][0][RTW89_KCC][8] = 68, [0][0][0][0][RTW89_ACMA][8] = 58, - [0][0][0][0][RTW89_CN][8] = 60, + [0][0][0][0][RTW89_CN][8] = 58, [0][0][0][0][RTW89_UK][8] = 58, [0][0][0][0][RTW89_FCC][9] = 84, [0][0][0][0][RTW89_ETSI][9] = 58, @@ -3599,7 +3658,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][9] = 84, [0][0][0][0][RTW89_KCC][9] = 68, [0][0][0][0][RTW89_ACMA][9] = 58, - [0][0][0][0][RTW89_CN][9] = 60, + [0][0][0][0][RTW89_CN][9] = 58, [0][0][0][0][RTW89_UK][9] = 58, [0][0][0][0][RTW89_FCC][10] = 82, [0][0][0][0][RTW89_ETSI][10] = 58, @@ -3607,7 +3666,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][10] = 82, [0][0][0][0][RTW89_KCC][10] = 68, [0][0][0][0][RTW89_ACMA][10] = 58, - [0][0][0][0][RTW89_CN][10] = 60, + [0][0][0][0][RTW89_CN][10] = 58, [0][0][0][0][RTW89_UK][10] = 58, [0][0][0][0][RTW89_FCC][11] = 62, [0][0][0][0][RTW89_ETSI][11] = 58, @@ -3615,7 +3674,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][11] = 62, [0][0][0][0][RTW89_KCC][11] = 68, [0][0][0][0][RTW89_ACMA][11] = 58, - [0][0][0][0][RTW89_CN][11] = 60, + [0][0][0][0][RTW89_CN][11] = 58, [0][0][0][0][RTW89_UK][11] = 58, [0][0][0][0][RTW89_FCC][12] = 52, [0][0][0][0][RTW89_ETSI][12] = 58, @@ -3623,7 +3682,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][12] = 52, [0][0][0][0][RTW89_KCC][12] = 68, [0][0][0][0][RTW89_ACMA][12] = 58, - [0][0][0][0][RTW89_CN][12] = 60, + [0][0][0][0][RTW89_CN][12] = 50, [0][0][0][0][RTW89_UK][12] = 58, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, @@ -3767,7 +3826,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][2] = 127, [1][0][0][0][RTW89_KCC][2] = 68, [1][0][0][0][RTW89_ACMA][2] = 58, - [1][0][0][0][RTW89_CN][2] = 60, + [1][0][0][0][RTW89_CN][2] = 58, [1][0][0][0][RTW89_UK][2] = 58, [1][0][0][0][RTW89_FCC][3] = 127, [1][0][0][0][RTW89_ETSI][3] = 58, @@ -3775,7 +3834,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][3] = 127, [1][0][0][0][RTW89_KCC][3] = 68, [1][0][0][0][RTW89_ACMA][3] = 58, - [1][0][0][0][RTW89_CN][3] = 60, + [1][0][0][0][RTW89_CN][3] = 58, [1][0][0][0][RTW89_UK][3] = 58, [1][0][0][0][RTW89_FCC][4] = 127, [1][0][0][0][RTW89_ETSI][4] = 58, @@ -3783,7 +3842,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][4] = 127, [1][0][0][0][RTW89_KCC][4] = 68, [1][0][0][0][RTW89_ACMA][4] = 58, - [1][0][0][0][RTW89_CN][4] = 60, + [1][0][0][0][RTW89_CN][4] = 58, [1][0][0][0][RTW89_UK][4] = 58, [1][0][0][0][RTW89_FCC][5] = 127, [1][0][0][0][RTW89_ETSI][5] = 58, @@ -3791,7 +3850,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][5] = 127, [1][0][0][0][RTW89_KCC][5] = 68, [1][0][0][0][RTW89_ACMA][5] = 58, - [1][0][0][0][RTW89_CN][5] = 60, + [1][0][0][0][RTW89_CN][5] = 58, [1][0][0][0][RTW89_UK][5] = 58, [1][0][0][0][RTW89_FCC][6] = 127, [1][0][0][0][RTW89_ETSI][6] = 58, @@ -3799,7 +3858,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][6] = 127, [1][0][0][0][RTW89_KCC][6] = 68, [1][0][0][0][RTW89_ACMA][6] = 58, - [1][0][0][0][RTW89_CN][6] = 60, + [1][0][0][0][RTW89_CN][6] = 58, [1][0][0][0][RTW89_UK][6] = 58, [1][0][0][0][RTW89_FCC][7] = 127, [1][0][0][0][RTW89_ETSI][7] = 58, @@ -3807,7 +3866,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][7] = 127, [1][0][0][0][RTW89_KCC][7] = 68, [1][0][0][0][RTW89_ACMA][7] = 58, - [1][0][0][0][RTW89_CN][7] = 60, + [1][0][0][0][RTW89_CN][7] = 58, [1][0][0][0][RTW89_UK][7] = 58, [1][0][0][0][RTW89_FCC][8] = 127, [1][0][0][0][RTW89_ETSI][8] = 58, @@ -3815,7 +3874,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][8] = 127, [1][0][0][0][RTW89_KCC][8] = 68, [1][0][0][0][RTW89_ACMA][8] = 58, - [1][0][0][0][RTW89_CN][8] = 60, + [1][0][0][0][RTW89_CN][8] = 58, [1][0][0][0][RTW89_UK][8] = 58, [1][0][0][0][RTW89_FCC][9] = 127, [1][0][0][0][RTW89_ETSI][9] = 58, @@ -3823,7 +3882,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][9] = 127, [1][0][0][0][RTW89_KCC][9] = 68, [1][0][0][0][RTW89_ACMA][9] = 58, - [1][0][0][0][RTW89_CN][9] = 60, + [1][0][0][0][RTW89_CN][9] = 58, [1][0][0][0][RTW89_UK][9] = 58, [1][0][0][0][RTW89_FCC][10] = 127, [1][0][0][0][RTW89_ETSI][10] = 58, @@ -3831,7 +3890,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][10] = 127, [1][0][0][0][RTW89_KCC][10] = 68, [1][0][0][0][RTW89_ACMA][10] = 58, - [1][0][0][0][RTW89_CN][10] = 60, + [1][0][0][0][RTW89_CN][10] = 50, [1][0][0][0][RTW89_UK][10] = 58, [1][0][0][0][RTW89_FCC][11] = 127, [1][0][0][0][RTW89_ETSI][11] = 127, @@ -4071,7 +4130,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][12] = 64, [0][0][1][0][RTW89_KCC][12] = 74, [0][0][1][0][RTW89_ACMA][12] = 58, - [0][0][1][0][RTW89_CN][12] = 60, + [0][0][1][0][RTW89_CN][12] = 40, [0][0][1][0][RTW89_UK][12] = 58, [0][0][1][0][RTW89_FCC][13] = 127, [0][0][1][0][RTW89_ETSI][13] = 127, @@ -4295,7 +4354,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 70, [0][0][2][0][RTW89_KCC][12] = 78, [0][0][2][0][RTW89_ACMA][12] = 60, - [0][0][2][0][RTW89_CN][12] = 60, + [0][0][2][0][RTW89_CN][12] = 38, [0][0][2][0][RTW89_UK][12] = 60, [0][0][2][0][RTW89_FCC][13] = 127, [0][0][2][0][RTW89_ETSI][13] = 127, @@ -4551,7 +4610,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][2] = 72, [1][0][2][0][RTW89_KCC][2] = 80, [1][0][2][0][RTW89_ACMA][2] = 58, - [1][0][2][0][RTW89_CN][2] = 60, + [1][0][2][0][RTW89_CN][2] = 58, [1][0][2][0][RTW89_UK][2] = 58, [1][0][2][0][RTW89_FCC][3] = 72, [1][0][2][0][RTW89_ETSI][3] = 58, @@ -4559,7 +4618,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][3] = 72, [1][0][2][0][RTW89_KCC][3] = 80, [1][0][2][0][RTW89_ACMA][3] = 58, - [1][0][2][0][RTW89_CN][3] = 60, + [1][0][2][0][RTW89_CN][3] = 58, [1][0][2][0][RTW89_UK][3] = 58, [1][0][2][0][RTW89_FCC][4] = 76, [1][0][2][0][RTW89_ETSI][4] = 58, @@ -4567,7 +4626,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][4] = 76, [1][0][2][0][RTW89_KCC][4] = 80, [1][0][2][0][RTW89_ACMA][4] = 58, - [1][0][2][0][RTW89_CN][4] = 60, + [1][0][2][0][RTW89_CN][4] = 58, [1][0][2][0][RTW89_UK][4] = 58, [1][0][2][0][RTW89_FCC][5] = 78, [1][0][2][0][RTW89_ETSI][5] = 58, @@ -4575,7 +4634,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 78, [1][0][2][0][RTW89_KCC][5] = 80, [1][0][2][0][RTW89_ACMA][5] = 58, - [1][0][2][0][RTW89_CN][5] = 60, + [1][0][2][0][RTW89_CN][5] = 58, [1][0][2][0][RTW89_UK][5] = 58, [1][0][2][0][RTW89_FCC][6] = 78, [1][0][2][0][RTW89_ETSI][6] = 58, @@ -4583,7 +4642,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][6] = 78, [1][0][2][0][RTW89_KCC][6] = 80, [1][0][2][0][RTW89_ACMA][6] = 58, - [1][0][2][0][RTW89_CN][6] = 60, + [1][0][2][0][RTW89_CN][6] = 58, [1][0][2][0][RTW89_UK][6] = 58, [1][0][2][0][RTW89_FCC][7] = 78, [1][0][2][0][RTW89_ETSI][7] = 58, @@ -4591,7 +4650,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][7] = 78, [1][0][2][0][RTW89_KCC][7] = 80, [1][0][2][0][RTW89_ACMA][7] = 58, - [1][0][2][0][RTW89_CN][7] = 60, + [1][0][2][0][RTW89_CN][7] = 58, [1][0][2][0][RTW89_UK][7] = 58, [1][0][2][0][RTW89_FCC][8] = 78, [1][0][2][0][RTW89_ETSI][8] = 58, @@ -4599,7 +4658,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][8] = 78, [1][0][2][0][RTW89_KCC][8] = 78, [1][0][2][0][RTW89_ACMA][8] = 58, - [1][0][2][0][RTW89_CN][8] = 60, + [1][0][2][0][RTW89_CN][8] = 58, [1][0][2][0][RTW89_UK][8] = 58, [1][0][2][0][RTW89_FCC][9] = 76, [1][0][2][0][RTW89_ETSI][9] = 58, @@ -4607,7 +4666,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 76, [1][0][2][0][RTW89_KCC][9] = 78, [1][0][2][0][RTW89_ACMA][9] = 58, - [1][0][2][0][RTW89_CN][9] = 60, + [1][0][2][0][RTW89_CN][9] = 58, [1][0][2][0][RTW89_UK][9] = 58, [1][0][2][0][RTW89_FCC][10] = 70, [1][0][2][0][RTW89_ETSI][10] = 58, @@ -4615,7 +4674,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][10] = 70, [1][0][2][0][RTW89_KCC][10] = 78, [1][0][2][0][RTW89_ACMA][10] = 58, - [1][0][2][0][RTW89_CN][10] = 60, + [1][0][2][0][RTW89_CN][10] = 46, [1][0][2][0][RTW89_UK][10] = 58, [1][0][2][0][RTW89_FCC][11] = 127, [1][0][2][0][RTW89_ETSI][11] = 127, @@ -4896,9 +4955,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][42] = 30, [0][0][1][0][RTW89_WW][44] = 30, [0][0][1][0][RTW89_WW][46] = 30, - [0][0][1][0][RTW89_WW][48] = 68, - [0][0][1][0][RTW89_WW][50] = 68, - [0][0][1][0][RTW89_WW][52] = 68, + [0][0][1][0][RTW89_WW][48] = 72, + [0][0][1][0][RTW89_WW][50] = 72, + [0][0][1][0][RTW89_WW][52] = 72, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][2] = 0, [0][1][1][0][RTW89_WW][4] = 0, @@ -4927,14 +4986,14 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][48] = 0, [0][1][1][0][RTW89_WW][50] = 0, [0][1][1][0][RTW89_WW][52] = 0, - [0][0][2][0][RTW89_WW][0] = 62, - [0][0][2][0][RTW89_WW][2] = 62, - [0][0][2][0][RTW89_WW][4] = 62, + [0][0][2][0][RTW89_WW][0] = 60, + [0][0][2][0][RTW89_WW][2] = 60, + [0][0][2][0][RTW89_WW][4] = 60, [0][0][2][0][RTW89_WW][6] = 54, - [0][0][2][0][RTW89_WW][8] = 62, - [0][0][2][0][RTW89_WW][10] = 62, - [0][0][2][0][RTW89_WW][12] = 62, - [0][0][2][0][RTW89_WW][14] = 62, + [0][0][2][0][RTW89_WW][8] = 60, + [0][0][2][0][RTW89_WW][10] = 60, + [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][14] = 60, [0][0][2][0][RTW89_WW][15] = 60, [0][0][2][0][RTW89_WW][17] = 62, [0][0][2][0][RTW89_WW][19] = 62, @@ -4952,9 +5011,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][42] = 30, [0][0][2][0][RTW89_WW][44] = 30, [0][0][2][0][RTW89_WW][46] = 30, - [0][0][2][0][RTW89_WW][48] = 70, - [0][0][2][0][RTW89_WW][50] = 72, - [0][0][2][0][RTW89_WW][52] = 72, + [0][0][2][0][RTW89_WW][48] = 74, + [0][0][2][0][RTW89_WW][50] = 76, + [0][0][2][0][RTW89_WW][52] = 76, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][2] = 0, [0][1][2][0][RTW89_WW][4] = 0, @@ -5011,11 +5070,11 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][48] = 0, [0][1][2][1][RTW89_WW][50] = 0, [0][1][2][1][RTW89_WW][52] = 0, - [1][0][2][0][RTW89_WW][1] = 60, + [1][0][2][0][RTW89_WW][1] = 62, [1][0][2][0][RTW89_WW][5] = 62, - [1][0][2][0][RTW89_WW][9] = 64, - [1][0][2][0][RTW89_WW][13] = 60, - [1][0][2][0][RTW89_WW][16] = 62, + [1][0][2][0][RTW89_WW][9] = 62, + [1][0][2][0][RTW89_WW][13] = 62, + [1][0][2][0][RTW89_WW][16] = 66, [1][0][2][0][RTW89_WW][20] = 66, [1][0][2][0][RTW89_WW][24] = 66, [1][0][2][0][RTW89_WW][28] = 66, @@ -5023,8 +5082,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][36] = 76, [1][0][2][0][RTW89_WW][39] = 30, [1][0][2][0][RTW89_WW][43] = 30, - [1][0][2][0][RTW89_WW][47] = 80, - [1][0][2][0][RTW89_WW][51] = 80, + [1][0][2][0][RTW89_WW][47] = 84, + [1][0][2][0][RTW89_WW][51] = 84, [1][1][2][0][RTW89_WW][1] = 0, [1][1][2][0][RTW89_WW][5] = 0, [1][1][2][0][RTW89_WW][9] = 0, @@ -5054,12 +5113,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][47] = 0, [1][1][2][1][RTW89_WW][51] = 0, [2][0][2][0][RTW89_WW][3] = 60, - [2][0][2][0][RTW89_WW][11] = 58, - [2][0][2][0][RTW89_WW][18] = 62, + [2][0][2][0][RTW89_WW][11] = 56, + [2][0][2][0][RTW89_WW][18] = 64, [2][0][2][0][RTW89_WW][26] = 64, [2][0][2][0][RTW89_WW][34] = 72, [2][0][2][0][RTW89_WW][41] = 30, - [2][0][2][0][RTW89_WW][49] = 70, + [2][0][2][0][RTW89_WW][49] = 74, [2][1][2][0][RTW89_WW][3] = 0, [2][1][2][0][RTW89_WW][11] = 0, [2][1][2][0][RTW89_WW][18] = 0, @@ -5074,8 +5133,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_WW][34] = 0, [2][1][2][1][RTW89_WW][41] = 0, [2][1][2][1][RTW89_WW][49] = 0, - [3][0][2][0][RTW89_WW][7] = 58, - [3][0][2][0][RTW89_WW][22] = 58, + [3][0][2][0][RTW89_WW][7] = 0, + [3][0][2][0][RTW89_WW][22] = 0, [3][0][2][0][RTW89_WW][45] = 0, [3][1][2][0][RTW89_WW][7] = 0, [3][1][2][0][RTW89_WW][22] = 0, @@ -5083,7 +5142,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_WW][7] = 0, [3][1][2][1][RTW89_WW][22] = 0, [3][1][2][1][RTW89_WW][45] = 0, - [0][0][1][0][RTW89_FCC][0] = 76, + [0][0][1][0][RTW89_FCC][0] = 80, [0][0][1][0][RTW89_ETSI][0] = 58, [0][0][1][0][RTW89_MKK][0] = 60, [0][0][1][0][RTW89_IC][0] = 62, @@ -5139,7 +5198,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][12] = 58, [0][0][1][0][RTW89_CN][12] = 60, [0][0][1][0][RTW89_UK][12] = 58, - [0][0][1][0][RTW89_FCC][14] = 74, + [0][0][1][0][RTW89_FCC][14] = 78, [0][0][1][0][RTW89_ETSI][14] = 58, [0][0][1][0][RTW89_MKK][14] = 60, [0][0][1][0][RTW89_IC][14] = 64, @@ -5147,10 +5206,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][14] = 58, [0][0][1][0][RTW89_CN][14] = 60, [0][0][1][0][RTW89_UK][14] = 58, - [0][0][1][0][RTW89_FCC][15] = 74, + [0][0][1][0][RTW89_FCC][15] = 78, [0][0][1][0][RTW89_ETSI][15] = 58, [0][0][1][0][RTW89_MKK][15] = 78, - [0][0][1][0][RTW89_IC][15] = 74, + [0][0][1][0][RTW89_IC][15] = 78, [0][0][1][0][RTW89_KCC][15] = 78, [0][0][1][0][RTW89_ACMA][15] = 58, [0][0][1][0][RTW89_CN][15] = 127, @@ -5227,10 +5286,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][33] = 60, [0][0][1][0][RTW89_CN][33] = 127, [0][0][1][0][RTW89_UK][33] = 60, - [0][0][1][0][RTW89_FCC][35] = 68, + [0][0][1][0][RTW89_FCC][35] = 72, [0][0][1][0][RTW89_ETSI][35] = 60, [0][0][1][0][RTW89_MKK][35] = 78, - [0][0][1][0][RTW89_IC][35] = 68, + [0][0][1][0][RTW89_IC][35] = 72, [0][0][1][0][RTW89_KCC][35] = 74, [0][0][1][0][RTW89_ACMA][35] = 60, [0][0][1][0][RTW89_CN][35] = 127, @@ -5249,7 +5308,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][38] = 82, [0][0][1][0][RTW89_KCC][38] = 70, [0][0][1][0][RTW89_ACMA][38] = 78, - [0][0][1][0][RTW89_CN][38] = 78, + [0][0][1][0][RTW89_CN][38] = 74, [0][0][1][0][RTW89_UK][38] = 58, [0][0][1][0][RTW89_FCC][40] = 82, [0][0][1][0][RTW89_ETSI][40] = 30, @@ -5257,7 +5316,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][40] = 82, [0][0][1][0][RTW89_KCC][40] = 76, [0][0][1][0][RTW89_ACMA][40] = 78, - [0][0][1][0][RTW89_CN][40] = 78, + [0][0][1][0][RTW89_CN][40] = 74, [0][0][1][0][RTW89_UK][40] = 58, [0][0][1][0][RTW89_FCC][42] = 82, [0][0][1][0][RTW89_ETSI][42] = 30, @@ -5265,7 +5324,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][42] = 82, [0][0][1][0][RTW89_KCC][42] = 76, [0][0][1][0][RTW89_ACMA][42] = 78, - [0][0][1][0][RTW89_CN][42] = 78, + [0][0][1][0][RTW89_CN][42] = 74, [0][0][1][0][RTW89_UK][42] = 58, [0][0][1][0][RTW89_FCC][44] = 82, [0][0][1][0][RTW89_ETSI][44] = 30, @@ -5273,7 +5332,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][44] = 82, [0][0][1][0][RTW89_KCC][44] = 76, [0][0][1][0][RTW89_ACMA][44] = 78, - [0][0][1][0][RTW89_CN][44] = 78, + [0][0][1][0][RTW89_CN][44] = 58, [0][0][1][0][RTW89_UK][44] = 58, [0][0][1][0][RTW89_FCC][46] = 82, [0][0][1][0][RTW89_ETSI][46] = 30, @@ -5281,9 +5340,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][46] = 82, [0][0][1][0][RTW89_KCC][46] = 76, [0][0][1][0][RTW89_ACMA][46] = 78, - [0][0][1][0][RTW89_CN][46] = 78, + [0][0][1][0][RTW89_CN][46] = 58, [0][0][1][0][RTW89_UK][46] = 58, - [0][0][1][0][RTW89_FCC][48] = 68, + [0][0][1][0][RTW89_FCC][48] = 72, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, [0][0][1][0][RTW89_IC][48] = 127, @@ -5291,7 +5350,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][48] = 127, [0][0][1][0][RTW89_CN][48] = 127, [0][0][1][0][RTW89_UK][48] = 127, - [0][0][1][0][RTW89_FCC][50] = 68, + [0][0][1][0][RTW89_FCC][50] = 72, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, [0][0][1][0][RTW89_IC][50] = 127, @@ -5299,7 +5358,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][50] = 127, [0][0][1][0][RTW89_CN][50] = 127, [0][0][1][0][RTW89_UK][50] = 127, - [0][0][1][0][RTW89_FCC][52] = 68, + [0][0][1][0][RTW89_FCC][52] = 72, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, [0][0][1][0][RTW89_IC][52] = 127, @@ -5531,13 +5590,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_ACMA][52] = 127, [0][1][1][0][RTW89_CN][52] = 127, [0][1][1][0][RTW89_UK][52] = 127, - [0][0][2][0][RTW89_FCC][0] = 74, + [0][0][2][0][RTW89_FCC][0] = 78, [0][0][2][0][RTW89_ETSI][0] = 62, [0][0][2][0][RTW89_MKK][0] = 62, [0][0][2][0][RTW89_IC][0] = 64, [0][0][2][0][RTW89_KCC][0] = 76, [0][0][2][0][RTW89_ACMA][0] = 62, - [0][0][2][0][RTW89_CN][0] = 62, + [0][0][2][0][RTW89_CN][0] = 60, [0][0][2][0][RTW89_UK][0] = 62, [0][0][2][0][RTW89_FCC][2] = 82, [0][0][2][0][RTW89_ETSI][2] = 62, @@ -5545,7 +5604,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][2] = 64, [0][0][2][0][RTW89_KCC][2] = 76, [0][0][2][0][RTW89_ACMA][2] = 62, - [0][0][2][0][RTW89_CN][2] = 62, + [0][0][2][0][RTW89_CN][2] = 60, [0][0][2][0][RTW89_UK][2] = 62, [0][0][2][0][RTW89_FCC][4] = 82, [0][0][2][0][RTW89_ETSI][4] = 62, @@ -5553,7 +5612,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][4] = 64, [0][0][2][0][RTW89_KCC][4] = 76, [0][0][2][0][RTW89_ACMA][4] = 62, - [0][0][2][0][RTW89_CN][4] = 62, + [0][0][2][0][RTW89_CN][4] = 60, [0][0][2][0][RTW89_UK][4] = 62, [0][0][2][0][RTW89_FCC][6] = 82, [0][0][2][0][RTW89_ETSI][6] = 62, @@ -5561,7 +5620,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][6] = 64, [0][0][2][0][RTW89_KCC][6] = 54, [0][0][2][0][RTW89_ACMA][6] = 62, - [0][0][2][0][RTW89_CN][6] = 62, + [0][0][2][0][RTW89_CN][6] = 60, [0][0][2][0][RTW89_UK][6] = 62, [0][0][2][0][RTW89_FCC][8] = 82, [0][0][2][0][RTW89_ETSI][8] = 62, @@ -5569,7 +5628,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][8] = 64, [0][0][2][0][RTW89_KCC][8] = 76, [0][0][2][0][RTW89_ACMA][8] = 62, - [0][0][2][0][RTW89_CN][8] = 62, + [0][0][2][0][RTW89_CN][8] = 60, [0][0][2][0][RTW89_UK][8] = 62, [0][0][2][0][RTW89_FCC][10] = 82, [0][0][2][0][RTW89_ETSI][10] = 62, @@ -5577,7 +5636,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][10] = 64, [0][0][2][0][RTW89_KCC][10] = 76, [0][0][2][0][RTW89_ACMA][10] = 62, - [0][0][2][0][RTW89_CN][10] = 62, + [0][0][2][0][RTW89_CN][10] = 60, [0][0][2][0][RTW89_UK][10] = 62, [0][0][2][0][RTW89_FCC][12] = 82, [0][0][2][0][RTW89_ETSI][12] = 62, @@ -5585,20 +5644,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 64, [0][0][2][0][RTW89_KCC][12] = 78, [0][0][2][0][RTW89_ACMA][12] = 62, - [0][0][2][0][RTW89_CN][12] = 62, + [0][0][2][0][RTW89_CN][12] = 60, [0][0][2][0][RTW89_UK][12] = 62, - [0][0][2][0][RTW89_FCC][14] = 72, + [0][0][2][0][RTW89_FCC][14] = 76, [0][0][2][0][RTW89_ETSI][14] = 62, [0][0][2][0][RTW89_MKK][14] = 62, [0][0][2][0][RTW89_IC][14] = 64, [0][0][2][0][RTW89_KCC][14] = 78, [0][0][2][0][RTW89_ACMA][14] = 62, - [0][0][2][0][RTW89_CN][14] = 62, + [0][0][2][0][RTW89_CN][14] = 60, [0][0][2][0][RTW89_UK][14] = 62, - [0][0][2][0][RTW89_FCC][15] = 72, + [0][0][2][0][RTW89_FCC][15] = 76, [0][0][2][0][RTW89_ETSI][15] = 60, [0][0][2][0][RTW89_MKK][15] = 78, - [0][0][2][0][RTW89_IC][15] = 72, + [0][0][2][0][RTW89_IC][15] = 76, [0][0][2][0][RTW89_KCC][15] = 78, [0][0][2][0][RTW89_ACMA][15] = 60, [0][0][2][0][RTW89_CN][15] = 127, @@ -5675,10 +5734,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][33] = 62, [0][0][2][0][RTW89_CN][33] = 127, [0][0][2][0][RTW89_UK][33] = 62, - [0][0][2][0][RTW89_FCC][35] = 68, + [0][0][2][0][RTW89_FCC][35] = 72, [0][0][2][0][RTW89_ETSI][35] = 62, [0][0][2][0][RTW89_MKK][35] = 78, - [0][0][2][0][RTW89_IC][35] = 68, + [0][0][2][0][RTW89_IC][35] = 72, [0][0][2][0][RTW89_KCC][35] = 74, [0][0][2][0][RTW89_ACMA][35] = 62, [0][0][2][0][RTW89_CN][35] = 127, @@ -5697,7 +5756,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][38] = 82, [0][0][2][0][RTW89_KCC][38] = 66, [0][0][2][0][RTW89_ACMA][38] = 78, - [0][0][2][0][RTW89_CN][38] = 78, + [0][0][2][0][RTW89_CN][38] = 70, [0][0][2][0][RTW89_UK][38] = 60, [0][0][2][0][RTW89_FCC][40] = 82, [0][0][2][0][RTW89_ETSI][40] = 30, @@ -5705,7 +5764,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][40] = 82, [0][0][2][0][RTW89_KCC][40] = 74, [0][0][2][0][RTW89_ACMA][40] = 78, - [0][0][2][0][RTW89_CN][40] = 78, + [0][0][2][0][RTW89_CN][40] = 70, [0][0][2][0][RTW89_UK][40] = 60, [0][0][2][0][RTW89_FCC][42] = 82, [0][0][2][0][RTW89_ETSI][42] = 30, @@ -5713,7 +5772,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][42] = 82, [0][0][2][0][RTW89_KCC][42] = 74, [0][0][2][0][RTW89_ACMA][42] = 78, - [0][0][2][0][RTW89_CN][42] = 78, + [0][0][2][0][RTW89_CN][42] = 70, [0][0][2][0][RTW89_UK][42] = 60, [0][0][2][0][RTW89_FCC][44] = 82, [0][0][2][0][RTW89_ETSI][44] = 30, @@ -5721,7 +5780,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][44] = 82, [0][0][2][0][RTW89_KCC][44] = 74, [0][0][2][0][RTW89_ACMA][44] = 78, - [0][0][2][0][RTW89_CN][44] = 78, + [0][0][2][0][RTW89_CN][44] = 58, [0][0][2][0][RTW89_UK][44] = 60, [0][0][2][0][RTW89_FCC][46] = 82, [0][0][2][0][RTW89_ETSI][46] = 30, @@ -5729,9 +5788,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][46] = 82, [0][0][2][0][RTW89_KCC][46] = 74, [0][0][2][0][RTW89_ACMA][46] = 78, - [0][0][2][0][RTW89_CN][46] = 78, + [0][0][2][0][RTW89_CN][46] = 58, [0][0][2][0][RTW89_UK][46] = 60, - [0][0][2][0][RTW89_FCC][48] = 70, + [0][0][2][0][RTW89_FCC][48] = 74, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, [0][0][2][0][RTW89_IC][48] = 127, @@ -5739,7 +5798,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][48] = 127, [0][0][2][0][RTW89_CN][48] = 127, [0][0][2][0][RTW89_UK][48] = 127, - [0][0][2][0][RTW89_FCC][50] = 72, + [0][0][2][0][RTW89_FCC][50] = 76, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, [0][0][2][0][RTW89_IC][50] = 127, @@ -5747,7 +5806,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][50] = 127, [0][0][2][0][RTW89_CN][50] = 127, [0][0][2][0][RTW89_UK][50] = 127, - [0][0][2][0][RTW89_FCC][52] = 72, + [0][0][2][0][RTW89_FCC][52] = 76, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, [0][0][2][0][RTW89_IC][52] = 127, @@ -6203,13 +6262,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_ACMA][52] = 127, [0][1][2][1][RTW89_CN][52] = 127, [0][1][2][1][RTW89_UK][52] = 127, - [1][0][2][0][RTW89_FCC][1] = 64, + [1][0][2][0][RTW89_FCC][1] = 68, [1][0][2][0][RTW89_ETSI][1] = 64, [1][0][2][0][RTW89_MKK][1] = 64, - [1][0][2][0][RTW89_IC][1] = 60, + [1][0][2][0][RTW89_IC][1] = 64, [1][0][2][0][RTW89_KCC][1] = 74, [1][0][2][0][RTW89_ACMA][1] = 64, - [1][0][2][0][RTW89_CN][1] = 64, + [1][0][2][0][RTW89_CN][1] = 62, [1][0][2][0][RTW89_UK][1] = 64, [1][0][2][0][RTW89_FCC][5] = 82, [1][0][2][0][RTW89_ETSI][5] = 64, @@ -6217,7 +6276,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 64, [1][0][2][0][RTW89_KCC][5] = 66, [1][0][2][0][RTW89_ACMA][5] = 64, - [1][0][2][0][RTW89_CN][5] = 64, + [1][0][2][0][RTW89_CN][5] = 62, [1][0][2][0][RTW89_UK][5] = 64, [1][0][2][0][RTW89_FCC][9] = 82, [1][0][2][0][RTW89_ETSI][9] = 64, @@ -6225,20 +6284,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 64, [1][0][2][0][RTW89_KCC][9] = 78, [1][0][2][0][RTW89_ACMA][9] = 64, - [1][0][2][0][RTW89_CN][9] = 64, + [1][0][2][0][RTW89_CN][9] = 62, [1][0][2][0][RTW89_UK][9] = 64, - [1][0][2][0][RTW89_FCC][13] = 62, + [1][0][2][0][RTW89_FCC][13] = 66, [1][0][2][0][RTW89_ETSI][13] = 64, [1][0][2][0][RTW89_MKK][13] = 64, - [1][0][2][0][RTW89_IC][13] = 60, + [1][0][2][0][RTW89_IC][13] = 64, [1][0][2][0][RTW89_KCC][13] = 72, [1][0][2][0][RTW89_ACMA][13] = 64, - [1][0][2][0][RTW89_CN][13] = 64, + [1][0][2][0][RTW89_CN][13] = 62, [1][0][2][0][RTW89_UK][13] = 64, - [1][0][2][0][RTW89_FCC][16] = 62, + [1][0][2][0][RTW89_FCC][16] = 66, [1][0][2][0][RTW89_ETSI][16] = 66, [1][0][2][0][RTW89_MKK][16] = 80, - [1][0][2][0][RTW89_IC][16] = 62, + [1][0][2][0][RTW89_IC][16] = 66, [1][0][2][0][RTW89_KCC][16] = 74, [1][0][2][0][RTW89_ACMA][16] = 66, [1][0][2][0][RTW89_CN][16] = 127, @@ -6246,7 +6305,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][20] = 80, [1][0][2][0][RTW89_ETSI][20] = 66, [1][0][2][0][RTW89_MKK][20] = 80, - [1][0][2][0][RTW89_IC][20] = 76, + [1][0][2][0][RTW89_IC][20] = 80, [1][0][2][0][RTW89_KCC][20] = 74, [1][0][2][0][RTW89_ACMA][20] = 66, [1][0][2][0][RTW89_CN][20] = 127, @@ -6267,10 +6326,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][28] = 127, [1][0][2][0][RTW89_CN][28] = 127, [1][0][2][0][RTW89_UK][28] = 66, - [1][0][2][0][RTW89_FCC][32] = 72, + [1][0][2][0][RTW89_FCC][32] = 76, [1][0][2][0][RTW89_ETSI][32] = 66, [1][0][2][0][RTW89_MKK][32] = 80, - [1][0][2][0][RTW89_IC][32] = 72, + [1][0][2][0][RTW89_IC][32] = 76, [1][0][2][0][RTW89_KCC][32] = 78, [1][0][2][0][RTW89_ACMA][32] = 66, [1][0][2][0][RTW89_CN][32] = 127, @@ -6286,10 +6345,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][39] = 84, [1][0][2][0][RTW89_ETSI][39] = 30, [1][0][2][0][RTW89_MKK][39] = 127, - [1][0][2][0][RTW89_IC][39] = 80, + [1][0][2][0][RTW89_IC][39] = 84, [1][0][2][0][RTW89_KCC][39] = 68, [1][0][2][0][RTW89_ACMA][39] = 80, - [1][0][2][0][RTW89_CN][39] = 70, + [1][0][2][0][RTW89_CN][39] = 60, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_FCC][43] = 84, [1][0][2][0][RTW89_ETSI][43] = 30, @@ -6297,9 +6356,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][43] = 84, [1][0][2][0][RTW89_KCC][43] = 78, [1][0][2][0][RTW89_ACMA][43] = 80, - [1][0][2][0][RTW89_CN][43] = 80, + [1][0][2][0][RTW89_CN][43] = 62, [1][0][2][0][RTW89_UK][43] = 64, - [1][0][2][0][RTW89_FCC][47] = 80, + [1][0][2][0][RTW89_FCC][47] = 84, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, [1][0][2][0][RTW89_IC][47] = 127, @@ -6307,7 +6366,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][47] = 127, [1][0][2][0][RTW89_CN][47] = 127, [1][0][2][0][RTW89_UK][47] = 127, - [1][0][2][0][RTW89_FCC][51] = 80, + [1][0][2][0][RTW89_FCC][51] = 84, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, [1][0][2][0][RTW89_IC][51] = 127, @@ -6539,26 +6598,26 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_ACMA][51] = 127, [1][1][2][1][RTW89_CN][51] = 127, [1][1][2][1][RTW89_UK][51] = 127, - [2][0][2][0][RTW89_FCC][3] = 72, + [2][0][2][0][RTW89_FCC][3] = 76, [2][0][2][0][RTW89_ETSI][3] = 64, [2][0][2][0][RTW89_MKK][3] = 62, - [2][0][2][0][RTW89_IC][3] = 60, + [2][0][2][0][RTW89_IC][3] = 64, [2][0][2][0][RTW89_KCC][3] = 72, [2][0][2][0][RTW89_ACMA][3] = 64, - [2][0][2][0][RTW89_CN][3] = 64, + [2][0][2][0][RTW89_CN][3] = 60, [2][0][2][0][RTW89_UK][3] = 64, - [2][0][2][0][RTW89_FCC][11] = 60, + [2][0][2][0][RTW89_FCC][11] = 64, [2][0][2][0][RTW89_ETSI][11] = 64, [2][0][2][0][RTW89_MKK][11] = 64, - [2][0][2][0][RTW89_IC][11] = 58, + [2][0][2][0][RTW89_IC][11] = 62, [2][0][2][0][RTW89_KCC][11] = 72, [2][0][2][0][RTW89_ACMA][11] = 64, - [2][0][2][0][RTW89_CN][11] = 64, + [2][0][2][0][RTW89_CN][11] = 56, [2][0][2][0][RTW89_UK][11] = 64, - [2][0][2][0][RTW89_FCC][18] = 62, + [2][0][2][0][RTW89_FCC][18] = 66, [2][0][2][0][RTW89_ETSI][18] = 64, [2][0][2][0][RTW89_MKK][18] = 72, - [2][0][2][0][RTW89_IC][18] = 62, + [2][0][2][0][RTW89_IC][18] = 66, [2][0][2][0][RTW89_KCC][18] = 72, [2][0][2][0][RTW89_ACMA][18] = 64, [2][0][2][0][RTW89_CN][18] = 127, @@ -6574,7 +6633,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][34] = 76, [2][0][2][0][RTW89_ETSI][34] = 127, [2][0][2][0][RTW89_MKK][34] = 72, - [2][0][2][0][RTW89_IC][34] = 72, + [2][0][2][0][RTW89_IC][34] = 76, [2][0][2][0][RTW89_KCC][34] = 72, [2][0][2][0][RTW89_ACMA][34] = 72, [2][0][2][0][RTW89_CN][34] = 127, @@ -6582,12 +6641,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][41] = 76, [2][0][2][0][RTW89_ETSI][41] = 30, [2][0][2][0][RTW89_MKK][41] = 127, - [2][0][2][0][RTW89_IC][41] = 72, + [2][0][2][0][RTW89_IC][41] = 76, [2][0][2][0][RTW89_KCC][41] = 64, [2][0][2][0][RTW89_ACMA][41] = 72, - [2][0][2][0][RTW89_CN][41] = 72, + [2][0][2][0][RTW89_CN][41] = 40, [2][0][2][0][RTW89_UK][41] = 64, - [2][0][2][0][RTW89_FCC][49] = 70, + [2][0][2][0][RTW89_FCC][49] = 74, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, [2][0][2][0][RTW89_IC][49] = 127, @@ -6713,7 +6772,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][7] = 127, [3][0][2][0][RTW89_KCC][7] = 127, [3][0][2][0][RTW89_ACMA][7] = 127, - [3][0][2][0][RTW89_CN][7] = 58, + [3][0][2][0][RTW89_CN][7] = 127, [3][0][2][0][RTW89_UK][7] = 127, [3][0][2][0][RTW89_FCC][22] = 127, [3][0][2][0][RTW89_ETSI][22] = 127, @@ -6721,7 +6780,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][22] = 127, [3][0][2][0][RTW89_KCC][22] = 127, [3][0][2][0][RTW89_ACMA][22] = 127, - [3][0][2][0][RTW89_CN][22] = 58, + [3][0][2][0][RTW89_CN][22] = 127, [3][0][2][0][RTW89_UK][22] = 127, [3][0][2][0][RTW89_FCC][45] = 127, [3][0][2][0][RTW89_ETSI][45] = 127, @@ -6798,19 +6857,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][11] = 30, [0][0][RTW89_WW][12] = 30, [0][0][RTW89_WW][13] = 0, - [0][1][RTW89_WW][0] = 20, - [0][1][RTW89_WW][1] = 22, - [0][1][RTW89_WW][2] = 22, - [0][1][RTW89_WW][3] = 22, - [0][1][RTW89_WW][4] = 22, - [0][1][RTW89_WW][5] = 22, - [0][1][RTW89_WW][6] = 22, - [0][1][RTW89_WW][7] = 22, - [0][1][RTW89_WW][8] = 22, - [0][1][RTW89_WW][9] = 22, - [0][1][RTW89_WW][10] = 22, - [0][1][RTW89_WW][11] = 22, - [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][1] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][3] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][5] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][7] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][9] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][11] = 0, + [0][1][RTW89_WW][12] = 0, [0][1][RTW89_WW][13] = 0, [1][0][RTW89_WW][0] = 42, [1][0][RTW89_WW][1] = 42, @@ -6826,19 +6885,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][11] = 42, [1][0][RTW89_WW][12] = 34, [1][0][RTW89_WW][13] = 0, - [1][1][RTW89_WW][0] = 32, - [1][1][RTW89_WW][1] = 32, - [1][1][RTW89_WW][2] = 32, - [1][1][RTW89_WW][3] = 32, - [1][1][RTW89_WW][4] = 32, - [1][1][RTW89_WW][5] = 32, - [1][1][RTW89_WW][6] = 32, - [1][1][RTW89_WW][7] = 32, - [1][1][RTW89_WW][8] = 32, - [1][1][RTW89_WW][9] = 32, - [1][1][RTW89_WW][10] = 32, - [1][1][RTW89_WW][11] = 32, - [1][1][RTW89_WW][12] = 32, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][1] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][3] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][5] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][7] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][9] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][11] = 0, + [1][1][RTW89_WW][12] = 0, [1][1][RTW89_WW][13] = 0, [2][0][RTW89_WW][0] = 54, [2][0][RTW89_WW][1] = 54, @@ -6854,19 +6913,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][11] = 54, [2][0][RTW89_WW][12] = 34, [2][0][RTW89_WW][13] = 0, - [2][1][RTW89_WW][0] = 44, - [2][1][RTW89_WW][1] = 44, - [2][1][RTW89_WW][2] = 44, - [2][1][RTW89_WW][3] = 44, - [2][1][RTW89_WW][4] = 44, - [2][1][RTW89_WW][5] = 44, - [2][1][RTW89_WW][6] = 44, - [2][1][RTW89_WW][7] = 44, - [2][1][RTW89_WW][8] = 44, - [2][1][RTW89_WW][9] = 44, - [2][1][RTW89_WW][10] = 44, - [2][1][RTW89_WW][11] = 44, - [2][1][RTW89_WW][12] = 42, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][1] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][3] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][5] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][7] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][9] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][11] = 0, + [2][1][RTW89_WW][12] = 0, [2][1][RTW89_WW][13] = 0, [0][0][RTW89_FCC][0] = 62, [0][0][RTW89_ETSI][0] = 30, @@ -6986,7 +7045,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 20, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][1] = 127, [0][1][RTW89_ETSI][1] = 127, @@ -6994,7 +7053,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][1] = 127, [0][1][RTW89_KCC][1] = 127, [0][1][RTW89_ACMA][1] = 127, - [0][1][RTW89_CN][1] = 22, + [0][1][RTW89_CN][1] = 127, [0][1][RTW89_UK][1] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -7002,7 +7061,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 22, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][3] = 127, [0][1][RTW89_ETSI][3] = 127, @@ -7010,7 +7069,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][3] = 127, [0][1][RTW89_KCC][3] = 127, [0][1][RTW89_ACMA][3] = 127, - [0][1][RTW89_CN][3] = 22, + [0][1][RTW89_CN][3] = 127, [0][1][RTW89_UK][3] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -7018,7 +7077,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 22, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][5] = 127, [0][1][RTW89_ETSI][5] = 127, @@ -7026,7 +7085,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][5] = 127, [0][1][RTW89_KCC][5] = 127, [0][1][RTW89_ACMA][5] = 127, - [0][1][RTW89_CN][5] = 22, + [0][1][RTW89_CN][5] = 127, [0][1][RTW89_UK][5] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -7034,7 +7093,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 22, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][7] = 127, [0][1][RTW89_ETSI][7] = 127, @@ -7042,7 +7101,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][7] = 127, [0][1][RTW89_KCC][7] = 127, [0][1][RTW89_ACMA][7] = 127, - [0][1][RTW89_CN][7] = 22, + [0][1][RTW89_CN][7] = 127, [0][1][RTW89_UK][7] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -7050,7 +7109,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 22, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][9] = 127, [0][1][RTW89_ETSI][9] = 127, @@ -7058,7 +7117,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][9] = 127, [0][1][RTW89_KCC][9] = 127, [0][1][RTW89_ACMA][9] = 127, - [0][1][RTW89_CN][9] = 22, + [0][1][RTW89_CN][9] = 127, [0][1][RTW89_UK][9] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -7066,7 +7125,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 22, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][11] = 127, [0][1][RTW89_ETSI][11] = 127, @@ -7074,7 +7133,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][11] = 127, [0][1][RTW89_KCC][11] = 127, [0][1][RTW89_ACMA][11] = 127, - [0][1][RTW89_CN][11] = 22, + [0][1][RTW89_CN][11] = 127, [0][1][RTW89_UK][11] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -7082,7 +7141,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 20, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][13] = 127, [0][1][RTW89_ETSI][13] = 127, @@ -7210,7 +7269,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 32, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][1] = 127, [1][1][RTW89_ETSI][1] = 127, @@ -7218,7 +7277,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][1] = 127, [1][1][RTW89_KCC][1] = 127, [1][1][RTW89_ACMA][1] = 127, - [1][1][RTW89_CN][1] = 32, + [1][1][RTW89_CN][1] = 127, [1][1][RTW89_UK][1] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -7226,7 +7285,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 32, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][3] = 127, [1][1][RTW89_ETSI][3] = 127, @@ -7234,7 +7293,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][3] = 127, [1][1][RTW89_KCC][3] = 127, [1][1][RTW89_ACMA][3] = 127, - [1][1][RTW89_CN][3] = 32, + [1][1][RTW89_CN][3] = 127, [1][1][RTW89_UK][3] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -7242,7 +7301,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 32, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][5] = 127, [1][1][RTW89_ETSI][5] = 127, @@ -7250,7 +7309,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][5] = 127, [1][1][RTW89_KCC][5] = 127, [1][1][RTW89_ACMA][5] = 127, - [1][1][RTW89_CN][5] = 32, + [1][1][RTW89_CN][5] = 127, [1][1][RTW89_UK][5] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -7258,7 +7317,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 32, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][7] = 127, [1][1][RTW89_ETSI][7] = 127, @@ -7266,7 +7325,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][7] = 127, [1][1][RTW89_KCC][7] = 127, [1][1][RTW89_ACMA][7] = 127, - [1][1][RTW89_CN][7] = 32, + [1][1][RTW89_CN][7] = 127, [1][1][RTW89_UK][7] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -7274,7 +7333,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 32, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][9] = 127, [1][1][RTW89_ETSI][9] = 127, @@ -7282,7 +7341,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][9] = 127, [1][1][RTW89_KCC][9] = 127, [1][1][RTW89_ACMA][9] = 127, - [1][1][RTW89_CN][9] = 32, + [1][1][RTW89_CN][9] = 127, [1][1][RTW89_UK][9] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -7290,7 +7349,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 32, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][11] = 127, [1][1][RTW89_ETSI][11] = 127, @@ -7298,7 +7357,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][11] = 127, [1][1][RTW89_KCC][11] = 127, [1][1][RTW89_ACMA][11] = 127, - [1][1][RTW89_CN][11] = 32, + [1][1][RTW89_CN][11] = 127, [1][1][RTW89_UK][11] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -7306,7 +7365,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 32, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][13] = 127, [1][1][RTW89_ETSI][13] = 127, @@ -7434,7 +7493,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 44, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][1] = 127, [2][1][RTW89_ETSI][1] = 127, @@ -7442,7 +7501,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][1] = 127, [2][1][RTW89_KCC][1] = 127, [2][1][RTW89_ACMA][1] = 127, - [2][1][RTW89_CN][1] = 44, + [2][1][RTW89_CN][1] = 127, [2][1][RTW89_UK][1] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -7450,7 +7509,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 44, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][3] = 127, [2][1][RTW89_ETSI][3] = 127, @@ -7458,7 +7517,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][3] = 127, [2][1][RTW89_KCC][3] = 127, [2][1][RTW89_ACMA][3] = 127, - [2][1][RTW89_CN][3] = 44, + [2][1][RTW89_CN][3] = 127, [2][1][RTW89_UK][3] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -7466,7 +7525,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 44, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][5] = 127, [2][1][RTW89_ETSI][5] = 127, @@ -7474,7 +7533,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][5] = 127, [2][1][RTW89_KCC][5] = 127, [2][1][RTW89_ACMA][5] = 127, - [2][1][RTW89_CN][5] = 44, + [2][1][RTW89_CN][5] = 127, [2][1][RTW89_UK][5] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -7482,7 +7541,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 44, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][7] = 127, [2][1][RTW89_ETSI][7] = 127, @@ -7490,7 +7549,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][7] = 127, [2][1][RTW89_KCC][7] = 127, [2][1][RTW89_ACMA][7] = 127, - [2][1][RTW89_CN][7] = 44, + [2][1][RTW89_CN][7] = 127, [2][1][RTW89_UK][7] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -7498,7 +7557,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 44, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][9] = 127, [2][1][RTW89_ETSI][9] = 127, @@ -7506,7 +7565,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][9] = 127, [2][1][RTW89_KCC][9] = 127, [2][1][RTW89_ACMA][9] = 127, - [2][1][RTW89_CN][9] = 44, + [2][1][RTW89_CN][9] = 127, [2][1][RTW89_UK][9] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -7514,7 +7573,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 44, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][11] = 127, [2][1][RTW89_ETSI][11] = 127, @@ -7522,7 +7581,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][11] = 127, [2][1][RTW89_KCC][11] = 127, [2][1][RTW89_ACMA][11] = 127, - [2][1][RTW89_CN][11] = 44, + [2][1][RTW89_CN][11] = 127, [2][1][RTW89_UK][11] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -7530,7 +7589,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 42, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][13] = 127, [2][1][RTW89_ETSI][13] = 127, @@ -7573,14 +7632,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][48] = 42, [0][0][RTW89_WW][50] = 42, [0][0][RTW89_WW][52] = 40, - [0][1][RTW89_WW][0] = 4, - [0][1][RTW89_WW][2] = 4, - [0][1][RTW89_WW][4] = 4, - [0][1][RTW89_WW][6] = 4, - [0][1][RTW89_WW][8] = 4, - [0][1][RTW89_WW][10] = 4, - [0][1][RTW89_WW][12] = 4, - [0][1][RTW89_WW][14] = 4, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][12] = 0, + [0][1][RTW89_WW][14] = 0, [0][1][RTW89_WW][15] = 0, [0][1][RTW89_WW][17] = 0, [0][1][RTW89_WW][19] = 0, @@ -7593,11 +7652,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_WW][33] = 0, [0][1][RTW89_WW][35] = 0, [0][1][RTW89_WW][37] = 0, - [0][1][RTW89_WW][38] = 42, - [0][1][RTW89_WW][40] = 42, - [0][1][RTW89_WW][42] = 42, - [0][1][RTW89_WW][44] = 42, - [0][1][RTW89_WW][46] = 42, + [0][1][RTW89_WW][38] = 0, + [0][1][RTW89_WW][40] = 0, + [0][1][RTW89_WW][42] = 0, + [0][1][RTW89_WW][44] = 0, + [0][1][RTW89_WW][46] = 0, [0][1][RTW89_WW][48] = 0, [0][1][RTW89_WW][50] = 0, [0][1][RTW89_WW][52] = 0, @@ -7629,14 +7688,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][48] = 52, [1][0][RTW89_WW][50] = 52, [1][0][RTW89_WW][52] = 52, - [1][1][RTW89_WW][0] = 14, - [1][1][RTW89_WW][2] = 14, - [1][1][RTW89_WW][4] = 14, - [1][1][RTW89_WW][6] = 14, - [1][1][RTW89_WW][8] = 14, - [1][1][RTW89_WW][10] = 14, - [1][1][RTW89_WW][12] = 14, - [1][1][RTW89_WW][14] = 14, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][12] = 0, + [1][1][RTW89_WW][14] = 0, [1][1][RTW89_WW][15] = 0, [1][1][RTW89_WW][17] = 0, [1][1][RTW89_WW][19] = 0, @@ -7649,11 +7708,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][33] = 0, [1][1][RTW89_WW][35] = 0, [1][1][RTW89_WW][37] = 0, - [1][1][RTW89_WW][38] = 54, - [1][1][RTW89_WW][40] = 54, - [1][1][RTW89_WW][42] = 54, - [1][1][RTW89_WW][44] = 54, - [1][1][RTW89_WW][46] = 54, + [1][1][RTW89_WW][38] = 0, + [1][1][RTW89_WW][40] = 0, + [1][1][RTW89_WW][42] = 0, + [1][1][RTW89_WW][44] = 0, + [1][1][RTW89_WW][46] = 0, [1][1][RTW89_WW][48] = 0, [1][1][RTW89_WW][50] = 0, [1][1][RTW89_WW][52] = 0, @@ -7685,14 +7744,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][48] = 64, [2][0][RTW89_WW][50] = 64, [2][0][RTW89_WW][52] = 60, - [2][1][RTW89_WW][0] = 28, - [2][1][RTW89_WW][2] = 28, - [2][1][RTW89_WW][4] = 28, - [2][1][RTW89_WW][6] = 28, - [2][1][RTW89_WW][8] = 28, - [2][1][RTW89_WW][10] = 28, - [2][1][RTW89_WW][12] = 28, - [2][1][RTW89_WW][14] = 28, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][12] = 0, + [2][1][RTW89_WW][14] = 0, [2][1][RTW89_WW][15] = 0, [2][1][RTW89_WW][17] = 0, [2][1][RTW89_WW][19] = 0, @@ -7705,11 +7764,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][33] = 0, [2][1][RTW89_WW][35] = 0, [2][1][RTW89_WW][37] = 0, - [2][1][RTW89_WW][38] = 56, - [2][1][RTW89_WW][40] = 56, - [2][1][RTW89_WW][42] = 56, - [2][1][RTW89_WW][44] = 56, - [2][1][RTW89_WW][46] = 56, + [2][1][RTW89_WW][38] = 0, + [2][1][RTW89_WW][40] = 0, + [2][1][RTW89_WW][42] = 0, + [2][1][RTW89_WW][44] = 0, + [2][1][RTW89_WW][46] = 0, [2][1][RTW89_WW][48] = 0, [2][1][RTW89_WW][50] = 0, [2][1][RTW89_WW][52] = 0, @@ -7943,7 +8002,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 4, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -7951,7 +8010,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 4, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -7959,7 +8018,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 4, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -7967,7 +8026,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 4, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -7975,7 +8034,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 4, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -7983,7 +8042,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 4, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -7991,7 +8050,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 4, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][14] = 127, [0][1][RTW89_ETSI][14] = 127, @@ -7999,7 +8058,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][14] = 127, [0][1][RTW89_KCC][14] = 127, [0][1][RTW89_ACMA][14] = 127, - [0][1][RTW89_CN][14] = 4, + [0][1][RTW89_CN][14] = 127, [0][1][RTW89_UK][14] = 127, [0][1][RTW89_FCC][15] = 127, [0][1][RTW89_ETSI][15] = 127, @@ -8103,7 +8162,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][38] = 127, [0][1][RTW89_KCC][38] = 127, [0][1][RTW89_ACMA][38] = 127, - [0][1][RTW89_CN][38] = 42, + [0][1][RTW89_CN][38] = 127, [0][1][RTW89_UK][38] = 127, [0][1][RTW89_FCC][40] = 127, [0][1][RTW89_ETSI][40] = 127, @@ -8111,7 +8170,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][40] = 127, [0][1][RTW89_KCC][40] = 127, [0][1][RTW89_ACMA][40] = 127, - [0][1][RTW89_CN][40] = 42, + [0][1][RTW89_CN][40] = 127, [0][1][RTW89_UK][40] = 127, [0][1][RTW89_FCC][42] = 127, [0][1][RTW89_ETSI][42] = 127, @@ -8119,7 +8178,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][42] = 127, [0][1][RTW89_KCC][42] = 127, [0][1][RTW89_ACMA][42] = 127, - [0][1][RTW89_CN][42] = 42, + [0][1][RTW89_CN][42] = 127, [0][1][RTW89_UK][42] = 127, [0][1][RTW89_FCC][44] = 127, [0][1][RTW89_ETSI][44] = 127, @@ -8127,7 +8186,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][44] = 127, [0][1][RTW89_KCC][44] = 127, [0][1][RTW89_ACMA][44] = 127, - [0][1][RTW89_CN][44] = 42, + [0][1][RTW89_CN][44] = 127, [0][1][RTW89_UK][44] = 127, [0][1][RTW89_FCC][46] = 127, [0][1][RTW89_ETSI][46] = 127, @@ -8135,7 +8194,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][46] = 127, [0][1][RTW89_KCC][46] = 127, [0][1][RTW89_ACMA][46] = 127, - [0][1][RTW89_CN][46] = 42, + [0][1][RTW89_CN][46] = 127, [0][1][RTW89_UK][46] = 127, [0][1][RTW89_FCC][48] = 127, [0][1][RTW89_ETSI][48] = 127, @@ -8391,7 +8450,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 14, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -8399,7 +8458,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 14, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -8407,7 +8466,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 14, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -8415,7 +8474,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 14, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -8423,7 +8482,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 14, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -8431,7 +8490,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 14, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -8439,7 +8498,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 14, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][14] = 127, [1][1][RTW89_ETSI][14] = 127, @@ -8447,7 +8506,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][14] = 127, [1][1][RTW89_KCC][14] = 127, [1][1][RTW89_ACMA][14] = 127, - [1][1][RTW89_CN][14] = 14, + [1][1][RTW89_CN][14] = 127, [1][1][RTW89_UK][14] = 127, [1][1][RTW89_FCC][15] = 127, [1][1][RTW89_ETSI][15] = 127, @@ -8551,7 +8610,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][38] = 127, [1][1][RTW89_KCC][38] = 127, [1][1][RTW89_ACMA][38] = 127, - [1][1][RTW89_CN][38] = 54, + [1][1][RTW89_CN][38] = 127, [1][1][RTW89_UK][38] = 127, [1][1][RTW89_FCC][40] = 127, [1][1][RTW89_ETSI][40] = 127, @@ -8559,7 +8618,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][40] = 127, [1][1][RTW89_KCC][40] = 127, [1][1][RTW89_ACMA][40] = 127, - [1][1][RTW89_CN][40] = 54, + [1][1][RTW89_CN][40] = 127, [1][1][RTW89_UK][40] = 127, [1][1][RTW89_FCC][42] = 127, [1][1][RTW89_ETSI][42] = 127, @@ -8567,7 +8626,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][42] = 127, [1][1][RTW89_KCC][42] = 127, [1][1][RTW89_ACMA][42] = 127, - [1][1][RTW89_CN][42] = 54, + [1][1][RTW89_CN][42] = 127, [1][1][RTW89_UK][42] = 127, [1][1][RTW89_FCC][44] = 127, [1][1][RTW89_ETSI][44] = 127, @@ -8575,7 +8634,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][44] = 127, [1][1][RTW89_KCC][44] = 127, [1][1][RTW89_ACMA][44] = 127, - [1][1][RTW89_CN][44] = 54, + [1][1][RTW89_CN][44] = 127, [1][1][RTW89_UK][44] = 127, [1][1][RTW89_FCC][46] = 127, [1][1][RTW89_ETSI][46] = 127, @@ -8583,7 +8642,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][46] = 127, [1][1][RTW89_KCC][46] = 127, [1][1][RTW89_ACMA][46] = 127, - [1][1][RTW89_CN][46] = 54, + [1][1][RTW89_CN][46] = 127, [1][1][RTW89_UK][46] = 127, [1][1][RTW89_FCC][48] = 127, [1][1][RTW89_ETSI][48] = 127, @@ -8839,7 +8898,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 28, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -8847,7 +8906,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 28, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -8855,7 +8914,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 28, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -8863,7 +8922,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 28, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -8871,7 +8930,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 28, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -8879,7 +8938,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 28, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -8887,7 +8946,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 28, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][14] = 127, [2][1][RTW89_ETSI][14] = 127, @@ -8895,7 +8954,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][14] = 127, [2][1][RTW89_KCC][14] = 127, [2][1][RTW89_ACMA][14] = 127, - [2][1][RTW89_CN][14] = 28, + [2][1][RTW89_CN][14] = 127, [2][1][RTW89_UK][14] = 127, [2][1][RTW89_FCC][15] = 127, [2][1][RTW89_ETSI][15] = 127, @@ -8999,7 +9058,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][38] = 127, [2][1][RTW89_KCC][38] = 127, [2][1][RTW89_ACMA][38] = 127, - [2][1][RTW89_CN][38] = 56, + [2][1][RTW89_CN][38] = 127, [2][1][RTW89_UK][38] = 127, [2][1][RTW89_FCC][40] = 127, [2][1][RTW89_ETSI][40] = 127, @@ -9007,7 +9066,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][40] = 127, [2][1][RTW89_KCC][40] = 127, [2][1][RTW89_ACMA][40] = 127, - [2][1][RTW89_CN][40] = 56, + [2][1][RTW89_CN][40] = 127, [2][1][RTW89_UK][40] = 127, [2][1][RTW89_FCC][42] = 127, [2][1][RTW89_ETSI][42] = 127, @@ -9015,7 +9074,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][42] = 127, [2][1][RTW89_KCC][42] = 127, [2][1][RTW89_ACMA][42] = 127, - [2][1][RTW89_CN][42] = 56, + [2][1][RTW89_CN][42] = 127, [2][1][RTW89_UK][42] = 127, [2][1][RTW89_FCC][44] = 127, [2][1][RTW89_ETSI][44] = 127, @@ -9023,7 +9082,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][44] = 127, [2][1][RTW89_KCC][44] = 127, [2][1][RTW89_ACMA][44] = 127, - [2][1][RTW89_CN][44] = 56, + [2][1][RTW89_CN][44] = 127, [2][1][RTW89_UK][44] = 127, [2][1][RTW89_FCC][46] = 127, [2][1][RTW89_ETSI][46] = 127, @@ -9031,7 +9090,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][46] = 127, [2][1][RTW89_KCC][46] = 127, [2][1][RTW89_ACMA][46] = 127, - [2][1][RTW89_CN][46] = 56, + [2][1][RTW89_CN][46] = 127, [2][1][RTW89_UK][46] = 127, [2][1][RTW89_FCC][48] = 127, [2][1][RTW89_ETSI][48] = 127, @@ -9063,19 +9122,19 @@ static const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][RTW89_WW][0] = 58, - [0][0][0][0][RTW89_WW][1] = 58, - [0][0][0][0][RTW89_WW][2] = 58, - [0][0][0][0][RTW89_WW][3] = 58, - [0][0][0][0][RTW89_WW][4] = 58, - [0][0][0][0][RTW89_WW][5] = 58, - [0][0][0][0][RTW89_WW][6] = 58, - [0][0][0][0][RTW89_WW][7] = 58, - [0][0][0][0][RTW89_WW][8] = 58, - [0][0][0][0][RTW89_WW][9] = 58, - [0][0][0][0][RTW89_WW][10] = 58, - [0][0][0][0][RTW89_WW][11] = 58, - [0][0][0][0][RTW89_WW][12] = 52, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, + [0][0][0][0][RTW89_WW][12] = 42, [0][0][0][0][RTW89_WW][13] = 76, [0][1][0][0][RTW89_WW][0] = 0, [0][1][0][0][RTW89_WW][1] = 0, @@ -9093,15 +9152,15 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_WW][13] = 0, [1][0][0][0][RTW89_WW][0] = 0, [1][0][0][0][RTW89_WW][1] = 0, - [1][0][0][0][RTW89_WW][2] = 58, - [1][0][0][0][RTW89_WW][3] = 58, - [1][0][0][0][RTW89_WW][4] = 58, - [1][0][0][0][RTW89_WW][5] = 58, - [1][0][0][0][RTW89_WW][6] = 58, - [1][0][0][0][RTW89_WW][7] = 58, - [1][0][0][0][RTW89_WW][8] = 58, - [1][0][0][0][RTW89_WW][9] = 58, - [1][0][0][0][RTW89_WW][10] = 58, + [1][0][0][0][RTW89_WW][2] = 56, + [1][0][0][0][RTW89_WW][3] = 56, + [1][0][0][0][RTW89_WW][4] = 56, + [1][0][0][0][RTW89_WW][5] = 56, + [1][0][0][0][RTW89_WW][6] = 56, + [1][0][0][0][RTW89_WW][7] = 56, + [1][0][0][0][RTW89_WW][8] = 56, + [1][0][0][0][RTW89_WW][9] = 56, + [1][0][0][0][RTW89_WW][10] = 42, [1][0][0][0][RTW89_WW][11] = 0, [1][0][0][0][RTW89_WW][12] = 0, [1][0][0][0][RTW89_WW][13] = 0, @@ -9131,7 +9190,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][9] = 60, [0][0][1][0][RTW89_WW][10] = 60, [0][0][1][0][RTW89_WW][11] = 60, - [0][0][1][0][RTW89_WW][12] = 58, + [0][0][1][0][RTW89_WW][12] = 40, [0][0][1][0][RTW89_WW][13] = 0, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][1] = 0, @@ -9147,19 +9206,19 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][11] = 0, [0][1][1][0][RTW89_WW][12] = 0, [0][1][1][0][RTW89_WW][13] = 0, - [0][0][2][0][RTW89_WW][0] = 60, - [0][0][2][0][RTW89_WW][1] = 60, - [0][0][2][0][RTW89_WW][2] = 60, - [0][0][2][0][RTW89_WW][3] = 60, - [0][0][2][0][RTW89_WW][4] = 60, - [0][0][2][0][RTW89_WW][5] = 60, - [0][0][2][0][RTW89_WW][6] = 60, - [0][0][2][0][RTW89_WW][7] = 60, - [0][0][2][0][RTW89_WW][8] = 60, - [0][0][2][0][RTW89_WW][9] = 60, - [0][0][2][0][RTW89_WW][10] = 60, - [0][0][2][0][RTW89_WW][11] = 60, - [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][0] = 58, + [0][0][2][0][RTW89_WW][1] = 58, + [0][0][2][0][RTW89_WW][2] = 58, + [0][0][2][0][RTW89_WW][3] = 58, + [0][0][2][0][RTW89_WW][4] = 58, + [0][0][2][0][RTW89_WW][5] = 58, + [0][0][2][0][RTW89_WW][6] = 58, + [0][0][2][0][RTW89_WW][7] = 58, + [0][0][2][0][RTW89_WW][8] = 58, + [0][0][2][0][RTW89_WW][9] = 58, + [0][0][2][0][RTW89_WW][10] = 58, + [0][0][2][0][RTW89_WW][11] = 58, + [0][0][2][0][RTW89_WW][12] = 38, [0][0][2][0][RTW89_WW][13] = 0, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][1] = 0, @@ -9191,15 +9250,15 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][13] = 0, [1][0][2][0][RTW89_WW][0] = 0, [1][0][2][0][RTW89_WW][1] = 0, - [1][0][2][0][RTW89_WW][2] = 58, - [1][0][2][0][RTW89_WW][3] = 58, - [1][0][2][0][RTW89_WW][4] = 58, - [1][0][2][0][RTW89_WW][5] = 58, - [1][0][2][0][RTW89_WW][6] = 58, - [1][0][2][0][RTW89_WW][7] = 58, - [1][0][2][0][RTW89_WW][8] = 58, - [1][0][2][0][RTW89_WW][9] = 58, - [1][0][2][0][RTW89_WW][10] = 58, + [1][0][2][0][RTW89_WW][2] = 56, + [1][0][2][0][RTW89_WW][3] = 56, + [1][0][2][0][RTW89_WW][4] = 56, + [1][0][2][0][RTW89_WW][5] = 56, + [1][0][2][0][RTW89_WW][6] = 56, + [1][0][2][0][RTW89_WW][7] = 56, + [1][0][2][0][RTW89_WW][8] = 56, + [1][0][2][0][RTW89_WW][9] = 56, + [1][0][2][0][RTW89_WW][10] = 48, [1][0][2][0][RTW89_WW][11] = 0, [1][0][2][0][RTW89_WW][12] = 0, [1][0][2][0][RTW89_WW][13] = 0, @@ -9237,7 +9296,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][0] = 82, [0][0][0][0][RTW89_KCC][0] = 68, [0][0][0][0][RTW89_ACMA][0] = 58, - [0][0][0][0][RTW89_CN][0] = 60, + [0][0][0][0][RTW89_CN][0] = 56, [0][0][0][0][RTW89_UK][0] = 58, [0][0][0][0][RTW89_FCC][1] = 82, [0][0][0][0][RTW89_ETSI][1] = 58, @@ -9245,7 +9304,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][1] = 82, [0][0][0][0][RTW89_KCC][1] = 68, [0][0][0][0][RTW89_ACMA][1] = 58, - [0][0][0][0][RTW89_CN][1] = 60, + [0][0][0][0][RTW89_CN][1] = 56, [0][0][0][0][RTW89_UK][1] = 58, [0][0][0][0][RTW89_FCC][2] = 82, [0][0][0][0][RTW89_ETSI][2] = 58, @@ -9253,7 +9312,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][2] = 82, [0][0][0][0][RTW89_KCC][2] = 68, [0][0][0][0][RTW89_ACMA][2] = 58, - [0][0][0][0][RTW89_CN][2] = 60, + [0][0][0][0][RTW89_CN][2] = 56, [0][0][0][0][RTW89_UK][2] = 58, [0][0][0][0][RTW89_FCC][3] = 82, [0][0][0][0][RTW89_ETSI][3] = 58, @@ -9261,7 +9320,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][3] = 82, [0][0][0][0][RTW89_KCC][3] = 68, [0][0][0][0][RTW89_ACMA][3] = 58, - [0][0][0][0][RTW89_CN][3] = 60, + [0][0][0][0][RTW89_CN][3] = 56, [0][0][0][0][RTW89_UK][3] = 58, [0][0][0][0][RTW89_FCC][4] = 82, [0][0][0][0][RTW89_ETSI][4] = 58, @@ -9269,7 +9328,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][4] = 82, [0][0][0][0][RTW89_KCC][4] = 68, [0][0][0][0][RTW89_ACMA][4] = 58, - [0][0][0][0][RTW89_CN][4] = 60, + [0][0][0][0][RTW89_CN][4] = 56, [0][0][0][0][RTW89_UK][4] = 58, [0][0][0][0][RTW89_FCC][5] = 82, [0][0][0][0][RTW89_ETSI][5] = 58, @@ -9277,7 +9336,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][5] = 82, [0][0][0][0][RTW89_KCC][5] = 68, [0][0][0][0][RTW89_ACMA][5] = 58, - [0][0][0][0][RTW89_CN][5] = 60, + [0][0][0][0][RTW89_CN][5] = 56, [0][0][0][0][RTW89_UK][5] = 58, [0][0][0][0][RTW89_FCC][6] = 82, [0][0][0][0][RTW89_ETSI][6] = 58, @@ -9285,7 +9344,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][6] = 82, [0][0][0][0][RTW89_KCC][6] = 68, [0][0][0][0][RTW89_ACMA][6] = 58, - [0][0][0][0][RTW89_CN][6] = 60, + [0][0][0][0][RTW89_CN][6] = 56, [0][0][0][0][RTW89_UK][6] = 58, [0][0][0][0][RTW89_FCC][7] = 82, [0][0][0][0][RTW89_ETSI][7] = 58, @@ -9293,7 +9352,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][7] = 82, [0][0][0][0][RTW89_KCC][7] = 68, [0][0][0][0][RTW89_ACMA][7] = 58, - [0][0][0][0][RTW89_CN][7] = 60, + [0][0][0][0][RTW89_CN][7] = 56, [0][0][0][0][RTW89_UK][7] = 58, [0][0][0][0][RTW89_FCC][8] = 82, [0][0][0][0][RTW89_ETSI][8] = 58, @@ -9301,7 +9360,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][8] = 82, [0][0][0][0][RTW89_KCC][8] = 68, [0][0][0][0][RTW89_ACMA][8] = 58, - [0][0][0][0][RTW89_CN][8] = 60, + [0][0][0][0][RTW89_CN][8] = 56, [0][0][0][0][RTW89_UK][8] = 58, [0][0][0][0][RTW89_FCC][9] = 82, [0][0][0][0][RTW89_ETSI][9] = 58, @@ -9309,7 +9368,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][9] = 82, [0][0][0][0][RTW89_KCC][9] = 68, [0][0][0][0][RTW89_ACMA][9] = 58, - [0][0][0][0][RTW89_CN][9] = 60, + [0][0][0][0][RTW89_CN][9] = 56, [0][0][0][0][RTW89_UK][9] = 58, [0][0][0][0][RTW89_FCC][10] = 80, [0][0][0][0][RTW89_ETSI][10] = 58, @@ -9317,7 +9376,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][10] = 80, [0][0][0][0][RTW89_KCC][10] = 68, [0][0][0][0][RTW89_ACMA][10] = 58, - [0][0][0][0][RTW89_CN][10] = 60, + [0][0][0][0][RTW89_CN][10] = 56, [0][0][0][0][RTW89_UK][10] = 58, [0][0][0][0][RTW89_FCC][11] = 60, [0][0][0][0][RTW89_ETSI][11] = 58, @@ -9325,7 +9384,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][11] = 60, [0][0][0][0][RTW89_KCC][11] = 68, [0][0][0][0][RTW89_ACMA][11] = 58, - [0][0][0][0][RTW89_CN][11] = 60, + [0][0][0][0][RTW89_CN][11] = 56, [0][0][0][0][RTW89_UK][11] = 58, [0][0][0][0][RTW89_FCC][12] = 52, [0][0][0][0][RTW89_ETSI][12] = 58, @@ -9333,7 +9392,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][12] = 52, [0][0][0][0][RTW89_KCC][12] = 68, [0][0][0][0][RTW89_ACMA][12] = 58, - [0][0][0][0][RTW89_CN][12] = 60, + [0][0][0][0][RTW89_CN][12] = 42, [0][0][0][0][RTW89_UK][12] = 58, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, @@ -9477,7 +9536,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][2] = 127, [1][0][0][0][RTW89_KCC][2] = 68, [1][0][0][0][RTW89_ACMA][2] = 58, - [1][0][0][0][RTW89_CN][2] = 60, + [1][0][0][0][RTW89_CN][2] = 56, [1][0][0][0][RTW89_UK][2] = 58, [1][0][0][0][RTW89_FCC][3] = 127, [1][0][0][0][RTW89_ETSI][3] = 58, @@ -9485,7 +9544,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][3] = 127, [1][0][0][0][RTW89_KCC][3] = 68, [1][0][0][0][RTW89_ACMA][3] = 58, - [1][0][0][0][RTW89_CN][3] = 60, + [1][0][0][0][RTW89_CN][3] = 56, [1][0][0][0][RTW89_UK][3] = 58, [1][0][0][0][RTW89_FCC][4] = 127, [1][0][0][0][RTW89_ETSI][4] = 58, @@ -9493,7 +9552,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][4] = 127, [1][0][0][0][RTW89_KCC][4] = 68, [1][0][0][0][RTW89_ACMA][4] = 58, - [1][0][0][0][RTW89_CN][4] = 60, + [1][0][0][0][RTW89_CN][4] = 56, [1][0][0][0][RTW89_UK][4] = 58, [1][0][0][0][RTW89_FCC][5] = 127, [1][0][0][0][RTW89_ETSI][5] = 58, @@ -9501,7 +9560,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][5] = 127, [1][0][0][0][RTW89_KCC][5] = 68, [1][0][0][0][RTW89_ACMA][5] = 58, - [1][0][0][0][RTW89_CN][5] = 60, + [1][0][0][0][RTW89_CN][5] = 56, [1][0][0][0][RTW89_UK][5] = 58, [1][0][0][0][RTW89_FCC][6] = 127, [1][0][0][0][RTW89_ETSI][6] = 58, @@ -9509,7 +9568,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][6] = 127, [1][0][0][0][RTW89_KCC][6] = 68, [1][0][0][0][RTW89_ACMA][6] = 58, - [1][0][0][0][RTW89_CN][6] = 60, + [1][0][0][0][RTW89_CN][6] = 56, [1][0][0][0][RTW89_UK][6] = 58, [1][0][0][0][RTW89_FCC][7] = 127, [1][0][0][0][RTW89_ETSI][7] = 58, @@ -9517,7 +9576,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][7] = 127, [1][0][0][0][RTW89_KCC][7] = 68, [1][0][0][0][RTW89_ACMA][7] = 58, - [1][0][0][0][RTW89_CN][7] = 60, + [1][0][0][0][RTW89_CN][7] = 56, [1][0][0][0][RTW89_UK][7] = 58, [1][0][0][0][RTW89_FCC][8] = 127, [1][0][0][0][RTW89_ETSI][8] = 58, @@ -9525,7 +9584,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][8] = 127, [1][0][0][0][RTW89_KCC][8] = 68, [1][0][0][0][RTW89_ACMA][8] = 58, - [1][0][0][0][RTW89_CN][8] = 60, + [1][0][0][0][RTW89_CN][8] = 56, [1][0][0][0][RTW89_UK][8] = 58, [1][0][0][0][RTW89_FCC][9] = 127, [1][0][0][0][RTW89_ETSI][9] = 58, @@ -9533,7 +9592,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][9] = 127, [1][0][0][0][RTW89_KCC][9] = 68, [1][0][0][0][RTW89_ACMA][9] = 58, - [1][0][0][0][RTW89_CN][9] = 60, + [1][0][0][0][RTW89_CN][9] = 56, [1][0][0][0][RTW89_UK][9] = 58, [1][0][0][0][RTW89_FCC][10] = 127, [1][0][0][0][RTW89_ETSI][10] = 58, @@ -9541,7 +9600,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][10] = 127, [1][0][0][0][RTW89_KCC][10] = 68, [1][0][0][0][RTW89_ACMA][10] = 58, - [1][0][0][0][RTW89_CN][10] = 60, + [1][0][0][0][RTW89_CN][10] = 42, [1][0][0][0][RTW89_UK][10] = 58, [1][0][0][0][RTW89_FCC][11] = 127, [1][0][0][0][RTW89_ETSI][11] = 127, @@ -9781,7 +9840,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][12] = 64, [0][0][1][0][RTW89_KCC][12] = 74, [0][0][1][0][RTW89_ACMA][12] = 58, - [0][0][1][0][RTW89_CN][12] = 60, + [0][0][1][0][RTW89_CN][12] = 40, [0][0][1][0][RTW89_UK][12] = 58, [0][0][1][0][RTW89_FCC][13] = 127, [0][0][1][0][RTW89_ETSI][13] = 127, @@ -9909,7 +9968,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][0] = 78, [0][0][2][0][RTW89_KCC][0] = 76, [0][0][2][0][RTW89_ACMA][0] = 60, - [0][0][2][0][RTW89_CN][0] = 60, + [0][0][2][0][RTW89_CN][0] = 58, [0][0][2][0][RTW89_UK][0] = 60, [0][0][2][0][RTW89_FCC][1] = 78, [0][0][2][0][RTW89_ETSI][1] = 60, @@ -9917,7 +9976,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][1] = 78, [0][0][2][0][RTW89_KCC][1] = 76, [0][0][2][0][RTW89_ACMA][1] = 60, - [0][0][2][0][RTW89_CN][1] = 60, + [0][0][2][0][RTW89_CN][1] = 58, [0][0][2][0][RTW89_UK][1] = 60, [0][0][2][0][RTW89_FCC][2] = 80, [0][0][2][0][RTW89_ETSI][2] = 60, @@ -9925,7 +9984,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][2] = 80, [0][0][2][0][RTW89_KCC][2] = 76, [0][0][2][0][RTW89_ACMA][2] = 60, - [0][0][2][0][RTW89_CN][2] = 60, + [0][0][2][0][RTW89_CN][2] = 58, [0][0][2][0][RTW89_UK][2] = 60, [0][0][2][0][RTW89_FCC][3] = 80, [0][0][2][0][RTW89_ETSI][3] = 60, @@ -9933,7 +9992,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][3] = 80, [0][0][2][0][RTW89_KCC][3] = 76, [0][0][2][0][RTW89_ACMA][3] = 60, - [0][0][2][0][RTW89_CN][3] = 60, + [0][0][2][0][RTW89_CN][3] = 58, [0][0][2][0][RTW89_UK][3] = 60, [0][0][2][0][RTW89_FCC][4] = 80, [0][0][2][0][RTW89_ETSI][4] = 60, @@ -9941,7 +10000,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][4] = 80, [0][0][2][0][RTW89_KCC][4] = 76, [0][0][2][0][RTW89_ACMA][4] = 60, - [0][0][2][0][RTW89_CN][4] = 60, + [0][0][2][0][RTW89_CN][4] = 58, [0][0][2][0][RTW89_UK][4] = 60, [0][0][2][0][RTW89_FCC][5] = 80, [0][0][2][0][RTW89_ETSI][5] = 60, @@ -9949,7 +10008,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][5] = 80, [0][0][2][0][RTW89_KCC][5] = 76, [0][0][2][0][RTW89_ACMA][5] = 60, - [0][0][2][0][RTW89_CN][5] = 60, + [0][0][2][0][RTW89_CN][5] = 58, [0][0][2][0][RTW89_UK][5] = 60, [0][0][2][0][RTW89_FCC][6] = 80, [0][0][2][0][RTW89_ETSI][6] = 60, @@ -9957,7 +10016,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][6] = 80, [0][0][2][0][RTW89_KCC][6] = 76, [0][0][2][0][RTW89_ACMA][6] = 60, - [0][0][2][0][RTW89_CN][6] = 60, + [0][0][2][0][RTW89_CN][6] = 58, [0][0][2][0][RTW89_UK][6] = 60, [0][0][2][0][RTW89_FCC][7] = 80, [0][0][2][0][RTW89_ETSI][7] = 60, @@ -9965,7 +10024,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][7] = 80, [0][0][2][0][RTW89_KCC][7] = 76, [0][0][2][0][RTW89_ACMA][7] = 60, - [0][0][2][0][RTW89_CN][7] = 60, + [0][0][2][0][RTW89_CN][7] = 58, [0][0][2][0][RTW89_UK][7] = 60, [0][0][2][0][RTW89_FCC][8] = 78, [0][0][2][0][RTW89_ETSI][8] = 60, @@ -9973,7 +10032,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][8] = 78, [0][0][2][0][RTW89_KCC][8] = 76, [0][0][2][0][RTW89_ACMA][8] = 60, - [0][0][2][0][RTW89_CN][8] = 60, + [0][0][2][0][RTW89_CN][8] = 58, [0][0][2][0][RTW89_UK][8] = 60, [0][0][2][0][RTW89_FCC][9] = 74, [0][0][2][0][RTW89_ETSI][9] = 60, @@ -9981,7 +10040,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][9] = 74, [0][0][2][0][RTW89_KCC][9] = 76, [0][0][2][0][RTW89_ACMA][9] = 60, - [0][0][2][0][RTW89_CN][9] = 60, + [0][0][2][0][RTW89_CN][9] = 58, [0][0][2][0][RTW89_UK][9] = 60, [0][0][2][0][RTW89_FCC][10] = 74, [0][0][2][0][RTW89_ETSI][10] = 60, @@ -9989,7 +10048,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][10] = 74, [0][0][2][0][RTW89_KCC][10] = 76, [0][0][2][0][RTW89_ACMA][10] = 60, - [0][0][2][0][RTW89_CN][10] = 60, + [0][0][2][0][RTW89_CN][10] = 58, [0][0][2][0][RTW89_UK][10] = 60, [0][0][2][0][RTW89_FCC][11] = 68, [0][0][2][0][RTW89_ETSI][11] = 60, @@ -9997,7 +10056,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][11] = 68, [0][0][2][0][RTW89_KCC][11] = 76, [0][0][2][0][RTW89_ACMA][11] = 60, - [0][0][2][0][RTW89_CN][11] = 60, + [0][0][2][0][RTW89_CN][11] = 58, [0][0][2][0][RTW89_UK][11] = 60, [0][0][2][0][RTW89_FCC][12] = 68, [0][0][2][0][RTW89_ETSI][12] = 60, @@ -10005,7 +10064,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 68, [0][0][2][0][RTW89_KCC][12] = 76, [0][0][2][0][RTW89_ACMA][12] = 60, - [0][0][2][0][RTW89_CN][12] = 60, + [0][0][2][0][RTW89_CN][12] = 38, [0][0][2][0][RTW89_UK][12] = 60, [0][0][2][0][RTW89_FCC][13] = 127, [0][0][2][0][RTW89_ETSI][13] = 127, @@ -10261,7 +10320,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][2] = 70, [1][0][2][0][RTW89_KCC][2] = 76, [1][0][2][0][RTW89_ACMA][2] = 58, - [1][0][2][0][RTW89_CN][2] = 60, + [1][0][2][0][RTW89_CN][2] = 56, [1][0][2][0][RTW89_UK][2] = 58, [1][0][2][0][RTW89_FCC][3] = 70, [1][0][2][0][RTW89_ETSI][3] = 58, @@ -10269,7 +10328,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][3] = 70, [1][0][2][0][RTW89_KCC][3] = 76, [1][0][2][0][RTW89_ACMA][3] = 58, - [1][0][2][0][RTW89_CN][3] = 60, + [1][0][2][0][RTW89_CN][3] = 56, [1][0][2][0][RTW89_UK][3] = 58, [1][0][2][0][RTW89_FCC][4] = 74, [1][0][2][0][RTW89_ETSI][4] = 58, @@ -10277,7 +10336,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][4] = 74, [1][0][2][0][RTW89_KCC][4] = 76, [1][0][2][0][RTW89_ACMA][4] = 58, - [1][0][2][0][RTW89_CN][4] = 60, + [1][0][2][0][RTW89_CN][4] = 56, [1][0][2][0][RTW89_UK][4] = 58, [1][0][2][0][RTW89_FCC][5] = 76, [1][0][2][0][RTW89_ETSI][5] = 58, @@ -10285,7 +10344,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 76, [1][0][2][0][RTW89_KCC][5] = 76, [1][0][2][0][RTW89_ACMA][5] = 58, - [1][0][2][0][RTW89_CN][5] = 60, + [1][0][2][0][RTW89_CN][5] = 56, [1][0][2][0][RTW89_UK][5] = 58, [1][0][2][0][RTW89_FCC][6] = 76, [1][0][2][0][RTW89_ETSI][6] = 58, @@ -10293,7 +10352,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][6] = 76, [1][0][2][0][RTW89_KCC][6] = 76, [1][0][2][0][RTW89_ACMA][6] = 58, - [1][0][2][0][RTW89_CN][6] = 60, + [1][0][2][0][RTW89_CN][6] = 56, [1][0][2][0][RTW89_UK][6] = 58, [1][0][2][0][RTW89_FCC][7] = 76, [1][0][2][0][RTW89_ETSI][7] = 58, @@ -10301,7 +10360,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][7] = 76, [1][0][2][0][RTW89_KCC][7] = 76, [1][0][2][0][RTW89_ACMA][7] = 58, - [1][0][2][0][RTW89_CN][7] = 60, + [1][0][2][0][RTW89_CN][7] = 56, [1][0][2][0][RTW89_UK][7] = 58, [1][0][2][0][RTW89_FCC][8] = 78, [1][0][2][0][RTW89_ETSI][8] = 58, @@ -10309,7 +10368,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][8] = 78, [1][0][2][0][RTW89_KCC][8] = 76, [1][0][2][0][RTW89_ACMA][8] = 58, - [1][0][2][0][RTW89_CN][8] = 60, + [1][0][2][0][RTW89_CN][8] = 56, [1][0][2][0][RTW89_UK][8] = 58, [1][0][2][0][RTW89_FCC][9] = 74, [1][0][2][0][RTW89_ETSI][9] = 58, @@ -10317,7 +10376,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 74, [1][0][2][0][RTW89_KCC][9] = 76, [1][0][2][0][RTW89_ACMA][9] = 58, - [1][0][2][0][RTW89_CN][9] = 60, + [1][0][2][0][RTW89_CN][9] = 56, [1][0][2][0][RTW89_UK][9] = 58, [1][0][2][0][RTW89_FCC][10] = 68, [1][0][2][0][RTW89_ETSI][10] = 58, @@ -10325,7 +10384,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][10] = 68, [1][0][2][0][RTW89_KCC][10] = 76, [1][0][2][0][RTW89_ACMA][10] = 58, - [1][0][2][0][RTW89_CN][10] = 60, + [1][0][2][0][RTW89_CN][10] = 48, [1][0][2][0][RTW89_UK][10] = 58, [1][0][2][0][RTW89_FCC][11] = 127, [1][0][2][0][RTW89_ETSI][11] = 127, @@ -10606,9 +10665,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][42] = 30, [0][0][1][0][RTW89_WW][44] = 30, [0][0][1][0][RTW89_WW][46] = 30, - [0][0][1][0][RTW89_WW][48] = 68, - [0][0][1][0][RTW89_WW][50] = 68, - [0][0][1][0][RTW89_WW][52] = 68, + [0][0][1][0][RTW89_WW][48] = 72, + [0][0][1][0][RTW89_WW][50] = 72, + [0][0][1][0][RTW89_WW][52] = 72, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][2] = 0, [0][1][1][0][RTW89_WW][4] = 0, @@ -10637,14 +10696,14 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][48] = 0, [0][1][1][0][RTW89_WW][50] = 0, [0][1][1][0][RTW89_WW][52] = 0, - [0][0][2][0][RTW89_WW][0] = 62, - [0][0][2][0][RTW89_WW][2] = 62, - [0][0][2][0][RTW89_WW][4] = 62, + [0][0][2][0][RTW89_WW][0] = 60, + [0][0][2][0][RTW89_WW][2] = 60, + [0][0][2][0][RTW89_WW][4] = 60, [0][0][2][0][RTW89_WW][6] = 54, - [0][0][2][0][RTW89_WW][8] = 62, - [0][0][2][0][RTW89_WW][10] = 62, - [0][0][2][0][RTW89_WW][12] = 62, - [0][0][2][0][RTW89_WW][14] = 62, + [0][0][2][0][RTW89_WW][8] = 60, + [0][0][2][0][RTW89_WW][10] = 60, + [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][14] = 60, [0][0][2][0][RTW89_WW][15] = 60, [0][0][2][0][RTW89_WW][17] = 62, [0][0][2][0][RTW89_WW][19] = 62, @@ -10662,9 +10721,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][42] = 30, [0][0][2][0][RTW89_WW][44] = 30, [0][0][2][0][RTW89_WW][46] = 30, - [0][0][2][0][RTW89_WW][48] = 70, - [0][0][2][0][RTW89_WW][50] = 70, - [0][0][2][0][RTW89_WW][52] = 70, + [0][0][2][0][RTW89_WW][48] = 74, + [0][0][2][0][RTW89_WW][50] = 74, + [0][0][2][0][RTW89_WW][52] = 74, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][2] = 0, [0][1][2][0][RTW89_WW][4] = 0, @@ -10721,11 +10780,11 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][48] = 0, [0][1][2][1][RTW89_WW][50] = 0, [0][1][2][1][RTW89_WW][52] = 0, - [1][0][2][0][RTW89_WW][1] = 60, + [1][0][2][0][RTW89_WW][1] = 64, [1][0][2][0][RTW89_WW][5] = 62, - [1][0][2][0][RTW89_WW][9] = 64, - [1][0][2][0][RTW89_WW][13] = 60, - [1][0][2][0][RTW89_WW][16] = 62, + [1][0][2][0][RTW89_WW][9] = 58, + [1][0][2][0][RTW89_WW][13] = 58, + [1][0][2][0][RTW89_WW][16] = 66, [1][0][2][0][RTW89_WW][20] = 66, [1][0][2][0][RTW89_WW][24] = 66, [1][0][2][0][RTW89_WW][28] = 66, @@ -10733,8 +10792,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][36] = 76, [1][0][2][0][RTW89_WW][39] = 30, [1][0][2][0][RTW89_WW][43] = 30, - [1][0][2][0][RTW89_WW][47] = 76, - [1][0][2][0][RTW89_WW][51] = 76, + [1][0][2][0][RTW89_WW][47] = 80, + [1][0][2][0][RTW89_WW][51] = 80, [1][1][2][0][RTW89_WW][1] = 0, [1][1][2][0][RTW89_WW][5] = 0, [1][1][2][0][RTW89_WW][9] = 0, @@ -10764,12 +10823,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][47] = 0, [1][1][2][1][RTW89_WW][51] = 0, [2][0][2][0][RTW89_WW][3] = 60, - [2][0][2][0][RTW89_WW][11] = 58, - [2][0][2][0][RTW89_WW][18] = 62, + [2][0][2][0][RTW89_WW][11] = 54, + [2][0][2][0][RTW89_WW][18] = 64, [2][0][2][0][RTW89_WW][26] = 64, [2][0][2][0][RTW89_WW][34] = 68, [2][0][2][0][RTW89_WW][41] = 30, - [2][0][2][0][RTW89_WW][49] = 68, + [2][0][2][0][RTW89_WW][49] = 72, [2][1][2][0][RTW89_WW][3] = 0, [2][1][2][0][RTW89_WW][11] = 0, [2][1][2][0][RTW89_WW][18] = 0, @@ -10784,8 +10843,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_WW][34] = 0, [2][1][2][1][RTW89_WW][41] = 0, [2][1][2][1][RTW89_WW][49] = 0, - [3][0][2][0][RTW89_WW][7] = 58, - [3][0][2][0][RTW89_WW][22] = 58, + [3][0][2][0][RTW89_WW][7] = 0, + [3][0][2][0][RTW89_WW][22] = 0, [3][0][2][0][RTW89_WW][45] = 0, [3][1][2][0][RTW89_WW][7] = 0, [3][1][2][0][RTW89_WW][22] = 0, @@ -10793,7 +10852,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_WW][7] = 0, [3][1][2][1][RTW89_WW][22] = 0, [3][1][2][1][RTW89_WW][45] = 0, - [0][0][1][0][RTW89_FCC][0] = 74, + [0][0][1][0][RTW89_FCC][0] = 78, [0][0][1][0][RTW89_ETSI][0] = 58, [0][0][1][0][RTW89_MKK][0] = 60, [0][0][1][0][RTW89_IC][0] = 62, @@ -10849,7 +10908,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][12] = 58, [0][0][1][0][RTW89_CN][12] = 60, [0][0][1][0][RTW89_UK][12] = 58, - [0][0][1][0][RTW89_FCC][14] = 72, + [0][0][1][0][RTW89_FCC][14] = 76, [0][0][1][0][RTW89_ETSI][14] = 58, [0][0][1][0][RTW89_MKK][14] = 60, [0][0][1][0][RTW89_IC][14] = 62, @@ -10857,10 +10916,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][14] = 58, [0][0][1][0][RTW89_CN][14] = 60, [0][0][1][0][RTW89_UK][14] = 58, - [0][0][1][0][RTW89_FCC][15] = 72, + [0][0][1][0][RTW89_FCC][15] = 76, [0][0][1][0][RTW89_ETSI][15] = 58, [0][0][1][0][RTW89_MKK][15] = 74, - [0][0][1][0][RTW89_IC][15] = 72, + [0][0][1][0][RTW89_IC][15] = 76, [0][0][1][0][RTW89_KCC][15] = 74, [0][0][1][0][RTW89_ACMA][15] = 58, [0][0][1][0][RTW89_CN][15] = 127, @@ -10937,10 +10996,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][33] = 60, [0][0][1][0][RTW89_CN][33] = 127, [0][0][1][0][RTW89_UK][33] = 60, - [0][0][1][0][RTW89_FCC][35] = 66, + [0][0][1][0][RTW89_FCC][35] = 70, [0][0][1][0][RTW89_ETSI][35] = 60, [0][0][1][0][RTW89_MKK][35] = 74, - [0][0][1][0][RTW89_IC][35] = 66, + [0][0][1][0][RTW89_IC][35] = 70, [0][0][1][0][RTW89_KCC][35] = 74, [0][0][1][0][RTW89_ACMA][35] = 60, [0][0][1][0][RTW89_CN][35] = 127, @@ -10959,7 +11018,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][38] = 78, [0][0][1][0][RTW89_KCC][38] = 70, [0][0][1][0][RTW89_ACMA][38] = 74, - [0][0][1][0][RTW89_CN][38] = 74, + [0][0][1][0][RTW89_CN][38] = 64, [0][0][1][0][RTW89_UK][38] = 58, [0][0][1][0][RTW89_FCC][40] = 78, [0][0][1][0][RTW89_ETSI][40] = 30, @@ -10967,7 +11026,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][40] = 78, [0][0][1][0][RTW89_KCC][40] = 74, [0][0][1][0][RTW89_ACMA][40] = 74, - [0][0][1][0][RTW89_CN][40] = 74, + [0][0][1][0][RTW89_CN][40] = 64, [0][0][1][0][RTW89_UK][40] = 58, [0][0][1][0][RTW89_FCC][42] = 78, [0][0][1][0][RTW89_ETSI][42] = 30, @@ -10975,7 +11034,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][42] = 78, [0][0][1][0][RTW89_KCC][42] = 74, [0][0][1][0][RTW89_ACMA][42] = 74, - [0][0][1][0][RTW89_CN][42] = 74, + [0][0][1][0][RTW89_CN][42] = 64, [0][0][1][0][RTW89_UK][42] = 58, [0][0][1][0][RTW89_FCC][44] = 78, [0][0][1][0][RTW89_ETSI][44] = 30, @@ -10983,7 +11042,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][44] = 78, [0][0][1][0][RTW89_KCC][44] = 74, [0][0][1][0][RTW89_ACMA][44] = 74, - [0][0][1][0][RTW89_CN][44] = 74, + [0][0][1][0][RTW89_CN][44] = 62, [0][0][1][0][RTW89_UK][44] = 58, [0][0][1][0][RTW89_FCC][46] = 78, [0][0][1][0][RTW89_ETSI][46] = 30, @@ -10991,9 +11050,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][46] = 78, [0][0][1][0][RTW89_KCC][46] = 74, [0][0][1][0][RTW89_ACMA][46] = 74, - [0][0][1][0][RTW89_CN][46] = 74, + [0][0][1][0][RTW89_CN][46] = 62, [0][0][1][0][RTW89_UK][46] = 58, - [0][0][1][0][RTW89_FCC][48] = 68, + [0][0][1][0][RTW89_FCC][48] = 72, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, [0][0][1][0][RTW89_IC][48] = 127, @@ -11001,7 +11060,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][48] = 127, [0][0][1][0][RTW89_CN][48] = 127, [0][0][1][0][RTW89_UK][48] = 127, - [0][0][1][0][RTW89_FCC][50] = 68, + [0][0][1][0][RTW89_FCC][50] = 72, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, [0][0][1][0][RTW89_IC][50] = 127, @@ -11009,7 +11068,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][50] = 127, [0][0][1][0][RTW89_CN][50] = 127, [0][0][1][0][RTW89_UK][50] = 127, - [0][0][1][0][RTW89_FCC][52] = 68, + [0][0][1][0][RTW89_FCC][52] = 72, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, [0][0][1][0][RTW89_IC][52] = 127, @@ -11241,13 +11300,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_ACMA][52] = 127, [0][1][1][0][RTW89_CN][52] = 127, [0][1][1][0][RTW89_UK][52] = 127, - [0][0][2][0][RTW89_FCC][0] = 72, + [0][0][2][0][RTW89_FCC][0] = 76, [0][0][2][0][RTW89_ETSI][0] = 62, [0][0][2][0][RTW89_MKK][0] = 62, [0][0][2][0][RTW89_IC][0] = 64, [0][0][2][0][RTW89_KCC][0] = 74, [0][0][2][0][RTW89_ACMA][0] = 62, - [0][0][2][0][RTW89_CN][0] = 62, + [0][0][2][0][RTW89_CN][0] = 60, [0][0][2][0][RTW89_UK][0] = 62, [0][0][2][0][RTW89_FCC][2] = 78, [0][0][2][0][RTW89_ETSI][2] = 62, @@ -11255,7 +11314,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][2] = 64, [0][0][2][0][RTW89_KCC][2] = 74, [0][0][2][0][RTW89_ACMA][2] = 62, - [0][0][2][0][RTW89_CN][2] = 62, + [0][0][2][0][RTW89_CN][2] = 60, [0][0][2][0][RTW89_UK][2] = 62, [0][0][2][0][RTW89_FCC][4] = 78, [0][0][2][0][RTW89_ETSI][4] = 62, @@ -11263,7 +11322,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][4] = 64, [0][0][2][0][RTW89_KCC][4] = 74, [0][0][2][0][RTW89_ACMA][4] = 62, - [0][0][2][0][RTW89_CN][4] = 62, + [0][0][2][0][RTW89_CN][4] = 60, [0][0][2][0][RTW89_UK][4] = 62, [0][0][2][0][RTW89_FCC][6] = 78, [0][0][2][0][RTW89_ETSI][6] = 62, @@ -11271,7 +11330,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][6] = 64, [0][0][2][0][RTW89_KCC][6] = 54, [0][0][2][0][RTW89_ACMA][6] = 62, - [0][0][2][0][RTW89_CN][6] = 62, + [0][0][2][0][RTW89_CN][6] = 60, [0][0][2][0][RTW89_UK][6] = 62, [0][0][2][0][RTW89_FCC][8] = 78, [0][0][2][0][RTW89_ETSI][8] = 62, @@ -11279,7 +11338,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][8] = 64, [0][0][2][0][RTW89_KCC][8] = 74, [0][0][2][0][RTW89_ACMA][8] = 62, - [0][0][2][0][RTW89_CN][8] = 62, + [0][0][2][0][RTW89_CN][8] = 60, [0][0][2][0][RTW89_UK][8] = 62, [0][0][2][0][RTW89_FCC][10] = 78, [0][0][2][0][RTW89_ETSI][10] = 62, @@ -11287,7 +11346,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][10] = 64, [0][0][2][0][RTW89_KCC][10] = 74, [0][0][2][0][RTW89_ACMA][10] = 62, - [0][0][2][0][RTW89_CN][10] = 62, + [0][0][2][0][RTW89_CN][10] = 60, [0][0][2][0][RTW89_UK][10] = 62, [0][0][2][0][RTW89_FCC][12] = 78, [0][0][2][0][RTW89_ETSI][12] = 62, @@ -11295,20 +11354,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 64, [0][0][2][0][RTW89_KCC][12] = 74, [0][0][2][0][RTW89_ACMA][12] = 62, - [0][0][2][0][RTW89_CN][12] = 62, + [0][0][2][0][RTW89_CN][12] = 60, [0][0][2][0][RTW89_UK][12] = 62, - [0][0][2][0][RTW89_FCC][14] = 70, + [0][0][2][0][RTW89_FCC][14] = 74, [0][0][2][0][RTW89_ETSI][14] = 62, [0][0][2][0][RTW89_MKK][14] = 62, [0][0][2][0][RTW89_IC][14] = 64, [0][0][2][0][RTW89_KCC][14] = 74, [0][0][2][0][RTW89_ACMA][14] = 62, - [0][0][2][0][RTW89_CN][14] = 62, + [0][0][2][0][RTW89_CN][14] = 60, [0][0][2][0][RTW89_UK][14] = 62, - [0][0][2][0][RTW89_FCC][15] = 70, + [0][0][2][0][RTW89_FCC][15] = 74, [0][0][2][0][RTW89_ETSI][15] = 60, [0][0][2][0][RTW89_MKK][15] = 74, - [0][0][2][0][RTW89_IC][15] = 70, + [0][0][2][0][RTW89_IC][15] = 74, [0][0][2][0][RTW89_KCC][15] = 74, [0][0][2][0][RTW89_ACMA][15] = 60, [0][0][2][0][RTW89_CN][15] = 127, @@ -11385,10 +11444,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][33] = 62, [0][0][2][0][RTW89_CN][33] = 127, [0][0][2][0][RTW89_UK][33] = 62, - [0][0][2][0][RTW89_FCC][35] = 68, + [0][0][2][0][RTW89_FCC][35] = 72, [0][0][2][0][RTW89_ETSI][35] = 62, [0][0][2][0][RTW89_MKK][35] = 74, - [0][0][2][0][RTW89_IC][35] = 68, + [0][0][2][0][RTW89_IC][35] = 72, [0][0][2][0][RTW89_KCC][35] = 74, [0][0][2][0][RTW89_ACMA][35] = 62, [0][0][2][0][RTW89_CN][35] = 127, @@ -11407,7 +11466,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][38] = 78, [0][0][2][0][RTW89_KCC][38] = 66, [0][0][2][0][RTW89_ACMA][38] = 74, - [0][0][2][0][RTW89_CN][38] = 74, + [0][0][2][0][RTW89_CN][38] = 66, [0][0][2][0][RTW89_UK][38] = 60, [0][0][2][0][RTW89_FCC][40] = 78, [0][0][2][0][RTW89_ETSI][40] = 30, @@ -11415,7 +11474,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][40] = 78, [0][0][2][0][RTW89_KCC][40] = 74, [0][0][2][0][RTW89_ACMA][40] = 74, - [0][0][2][0][RTW89_CN][40] = 74, + [0][0][2][0][RTW89_CN][40] = 66, [0][0][2][0][RTW89_UK][40] = 60, [0][0][2][0][RTW89_FCC][42] = 78, [0][0][2][0][RTW89_ETSI][42] = 30, @@ -11423,7 +11482,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][42] = 78, [0][0][2][0][RTW89_KCC][42] = 74, [0][0][2][0][RTW89_ACMA][42] = 74, - [0][0][2][0][RTW89_CN][42] = 74, + [0][0][2][0][RTW89_CN][42] = 66, [0][0][2][0][RTW89_UK][42] = 60, [0][0][2][0][RTW89_FCC][44] = 78, [0][0][2][0][RTW89_ETSI][44] = 30, @@ -11431,7 +11490,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][44] = 78, [0][0][2][0][RTW89_KCC][44] = 74, [0][0][2][0][RTW89_ACMA][44] = 74, - [0][0][2][0][RTW89_CN][44] = 74, + [0][0][2][0][RTW89_CN][44] = 64, [0][0][2][0][RTW89_UK][44] = 60, [0][0][2][0][RTW89_FCC][46] = 78, [0][0][2][0][RTW89_ETSI][46] = 30, @@ -11439,9 +11498,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][46] = 78, [0][0][2][0][RTW89_KCC][46] = 74, [0][0][2][0][RTW89_ACMA][46] = 74, - [0][0][2][0][RTW89_CN][46] = 74, + [0][0][2][0][RTW89_CN][46] = 64, [0][0][2][0][RTW89_UK][46] = 60, - [0][0][2][0][RTW89_FCC][48] = 70, + [0][0][2][0][RTW89_FCC][48] = 74, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, [0][0][2][0][RTW89_IC][48] = 127, @@ -11449,7 +11508,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][48] = 127, [0][0][2][0][RTW89_CN][48] = 127, [0][0][2][0][RTW89_UK][48] = 127, - [0][0][2][0][RTW89_FCC][50] = 70, + [0][0][2][0][RTW89_FCC][50] = 74, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, [0][0][2][0][RTW89_IC][50] = 127, @@ -11457,7 +11516,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][50] = 127, [0][0][2][0][RTW89_CN][50] = 127, [0][0][2][0][RTW89_UK][50] = 127, - [0][0][2][0][RTW89_FCC][52] = 70, + [0][0][2][0][RTW89_FCC][52] = 74, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, [0][0][2][0][RTW89_IC][52] = 127, @@ -11913,13 +11972,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_ACMA][52] = 127, [0][1][2][1][RTW89_CN][52] = 127, [0][1][2][1][RTW89_UK][52] = 127, - [1][0][2][0][RTW89_FCC][1] = 62, + [1][0][2][0][RTW89_FCC][1] = 66, [1][0][2][0][RTW89_ETSI][1] = 64, [1][0][2][0][RTW89_MKK][1] = 64, - [1][0][2][0][RTW89_IC][1] = 60, + [1][0][2][0][RTW89_IC][1] = 64, [1][0][2][0][RTW89_KCC][1] = 74, [1][0][2][0][RTW89_ACMA][1] = 64, - [1][0][2][0][RTW89_CN][1] = 64, + [1][0][2][0][RTW89_CN][1] = 66, [1][0][2][0][RTW89_UK][1] = 64, [1][0][2][0][RTW89_FCC][5] = 80, [1][0][2][0][RTW89_ETSI][5] = 64, @@ -11927,7 +11986,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 64, [1][0][2][0][RTW89_KCC][5] = 66, [1][0][2][0][RTW89_ACMA][5] = 64, - [1][0][2][0][RTW89_CN][5] = 64, + [1][0][2][0][RTW89_CN][5] = 66, [1][0][2][0][RTW89_UK][5] = 64, [1][0][2][0][RTW89_FCC][9] = 80, [1][0][2][0][RTW89_ETSI][9] = 64, @@ -11935,20 +11994,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 64, [1][0][2][0][RTW89_KCC][9] = 76, [1][0][2][0][RTW89_ACMA][9] = 64, - [1][0][2][0][RTW89_CN][9] = 64, + [1][0][2][0][RTW89_CN][9] = 58, [1][0][2][0][RTW89_UK][9] = 64, - [1][0][2][0][RTW89_FCC][13] = 60, + [1][0][2][0][RTW89_FCC][13] = 64, [1][0][2][0][RTW89_ETSI][13] = 64, [1][0][2][0][RTW89_MKK][13] = 64, - [1][0][2][0][RTW89_IC][13] = 60, + [1][0][2][0][RTW89_IC][13] = 64, [1][0][2][0][RTW89_KCC][13] = 72, [1][0][2][0][RTW89_ACMA][13] = 64, - [1][0][2][0][RTW89_CN][13] = 64, + [1][0][2][0][RTW89_CN][13] = 58, [1][0][2][0][RTW89_UK][13] = 64, - [1][0][2][0][RTW89_FCC][16] = 62, + [1][0][2][0][RTW89_FCC][16] = 66, [1][0][2][0][RTW89_ETSI][16] = 66, [1][0][2][0][RTW89_MKK][16] = 76, - [1][0][2][0][RTW89_IC][16] = 62, + [1][0][2][0][RTW89_IC][16] = 66, [1][0][2][0][RTW89_KCC][16] = 74, [1][0][2][0][RTW89_ACMA][16] = 66, [1][0][2][0][RTW89_CN][16] = 127, @@ -11956,7 +12015,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][20] = 80, [1][0][2][0][RTW89_ETSI][20] = 66, [1][0][2][0][RTW89_MKK][20] = 76, - [1][0][2][0][RTW89_IC][20] = 76, + [1][0][2][0][RTW89_IC][20] = 80, [1][0][2][0][RTW89_KCC][20] = 74, [1][0][2][0][RTW89_ACMA][20] = 66, [1][0][2][0][RTW89_CN][20] = 127, @@ -11977,10 +12036,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][28] = 127, [1][0][2][0][RTW89_CN][28] = 127, [1][0][2][0][RTW89_UK][28] = 66, - [1][0][2][0][RTW89_FCC][32] = 70, + [1][0][2][0][RTW89_FCC][32] = 74, [1][0][2][0][RTW89_ETSI][32] = 66, [1][0][2][0][RTW89_MKK][32] = 76, - [1][0][2][0][RTW89_IC][32] = 70, + [1][0][2][0][RTW89_IC][32] = 74, [1][0][2][0][RTW89_KCC][32] = 76, [1][0][2][0][RTW89_ACMA][32] = 66, [1][0][2][0][RTW89_CN][32] = 127, @@ -11996,10 +12055,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][39] = 80, [1][0][2][0][RTW89_ETSI][39] = 30, [1][0][2][0][RTW89_MKK][39] = 127, - [1][0][2][0][RTW89_IC][39] = 76, + [1][0][2][0][RTW89_IC][39] = 80, [1][0][2][0][RTW89_KCC][39] = 68, [1][0][2][0][RTW89_ACMA][39] = 76, - [1][0][2][0][RTW89_CN][39] = 70, + [1][0][2][0][RTW89_CN][39] = 56, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_FCC][43] = 80, [1][0][2][0][RTW89_ETSI][43] = 30, @@ -12007,9 +12066,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][43] = 80, [1][0][2][0][RTW89_KCC][43] = 76, [1][0][2][0][RTW89_ACMA][43] = 76, - [1][0][2][0][RTW89_CN][43] = 76, + [1][0][2][0][RTW89_CN][43] = 64, [1][0][2][0][RTW89_UK][43] = 64, - [1][0][2][0][RTW89_FCC][47] = 76, + [1][0][2][0][RTW89_FCC][47] = 80, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, [1][0][2][0][RTW89_IC][47] = 127, @@ -12017,7 +12076,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][47] = 127, [1][0][2][0][RTW89_CN][47] = 127, [1][0][2][0][RTW89_UK][47] = 127, - [1][0][2][0][RTW89_FCC][51] = 76, + [1][0][2][0][RTW89_FCC][51] = 80, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, [1][0][2][0][RTW89_IC][51] = 127, @@ -12249,26 +12308,26 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_ACMA][51] = 127, [1][1][2][1][RTW89_CN][51] = 127, [1][1][2][1][RTW89_UK][51] = 127, - [2][0][2][0][RTW89_FCC][3] = 68, + [2][0][2][0][RTW89_FCC][3] = 72, [2][0][2][0][RTW89_ETSI][3] = 64, [2][0][2][0][RTW89_MKK][3] = 62, - [2][0][2][0][RTW89_IC][3] = 60, + [2][0][2][0][RTW89_IC][3] = 64, [2][0][2][0][RTW89_KCC][3] = 68, [2][0][2][0][RTW89_ACMA][3] = 64, - [2][0][2][0][RTW89_CN][3] = 64, + [2][0][2][0][RTW89_CN][3] = 60, [2][0][2][0][RTW89_UK][3] = 64, - [2][0][2][0][RTW89_FCC][11] = 58, + [2][0][2][0][RTW89_FCC][11] = 62, [2][0][2][0][RTW89_ETSI][11] = 64, [2][0][2][0][RTW89_MKK][11] = 64, - [2][0][2][0][RTW89_IC][11] = 58, + [2][0][2][0][RTW89_IC][11] = 62, [2][0][2][0][RTW89_KCC][11] = 68, [2][0][2][0][RTW89_ACMA][11] = 64, - [2][0][2][0][RTW89_CN][11] = 64, + [2][0][2][0][RTW89_CN][11] = 54, [2][0][2][0][RTW89_UK][11] = 64, - [2][0][2][0][RTW89_FCC][18] = 62, + [2][0][2][0][RTW89_FCC][18] = 66, [2][0][2][0][RTW89_ETSI][18] = 64, [2][0][2][0][RTW89_MKK][18] = 68, - [2][0][2][0][RTW89_IC][18] = 62, + [2][0][2][0][RTW89_IC][18] = 66, [2][0][2][0][RTW89_KCC][18] = 68, [2][0][2][0][RTW89_ACMA][18] = 64, [2][0][2][0][RTW89_CN][18] = 127, @@ -12284,7 +12343,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][34] = 72, [2][0][2][0][RTW89_ETSI][34] = 127, [2][0][2][0][RTW89_MKK][34] = 68, - [2][0][2][0][RTW89_IC][34] = 68, + [2][0][2][0][RTW89_IC][34] = 72, [2][0][2][0][RTW89_KCC][34] = 68, [2][0][2][0][RTW89_ACMA][34] = 68, [2][0][2][0][RTW89_CN][34] = 127, @@ -12292,12 +12351,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][41] = 72, [2][0][2][0][RTW89_ETSI][41] = 30, [2][0][2][0][RTW89_MKK][41] = 127, - [2][0][2][0][RTW89_IC][41] = 68, + [2][0][2][0][RTW89_IC][41] = 72, [2][0][2][0][RTW89_KCC][41] = 64, [2][0][2][0][RTW89_ACMA][41] = 68, - [2][0][2][0][RTW89_CN][41] = 68, + [2][0][2][0][RTW89_CN][41] = 38, [2][0][2][0][RTW89_UK][41] = 64, - [2][0][2][0][RTW89_FCC][49] = 68, + [2][0][2][0][RTW89_FCC][49] = 72, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, [2][0][2][0][RTW89_IC][49] = 127, @@ -12423,7 +12482,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][7] = 127, [3][0][2][0][RTW89_KCC][7] = 127, [3][0][2][0][RTW89_ACMA][7] = 127, - [3][0][2][0][RTW89_CN][7] = 58, + [3][0][2][0][RTW89_CN][7] = 127, [3][0][2][0][RTW89_UK][7] = 127, [3][0][2][0][RTW89_FCC][22] = 127, [3][0][2][0][RTW89_ETSI][22] = 127, @@ -12431,7 +12490,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][22] = 127, [3][0][2][0][RTW89_KCC][22] = 127, [3][0][2][0][RTW89_ACMA][22] = 127, - [3][0][2][0][RTW89_CN][22] = 58, + [3][0][2][0][RTW89_CN][22] = 127, [3][0][2][0][RTW89_UK][22] = 127, [3][0][2][0][RTW89_FCC][45] = 127, [3][0][2][0][RTW89_ETSI][45] = 127, @@ -12508,19 +12567,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][11] = 30, [0][0][RTW89_WW][12] = 30, [0][0][RTW89_WW][13] = 0, - [0][1][RTW89_WW][0] = 20, - [0][1][RTW89_WW][1] = 22, - [0][1][RTW89_WW][2] = 22, - [0][1][RTW89_WW][3] = 22, - [0][1][RTW89_WW][4] = 22, - [0][1][RTW89_WW][5] = 22, - [0][1][RTW89_WW][6] = 22, - [0][1][RTW89_WW][7] = 22, - [0][1][RTW89_WW][8] = 22, - [0][1][RTW89_WW][9] = 22, - [0][1][RTW89_WW][10] = 22, - [0][1][RTW89_WW][11] = 22, - [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][1] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][3] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][5] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][7] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][9] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][11] = 0, + [0][1][RTW89_WW][12] = 0, [0][1][RTW89_WW][13] = 0, [1][0][RTW89_WW][0] = 42, [1][0][RTW89_WW][1] = 42, @@ -12536,19 +12595,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][11] = 42, [1][0][RTW89_WW][12] = 34, [1][0][RTW89_WW][13] = 0, - [1][1][RTW89_WW][0] = 32, - [1][1][RTW89_WW][1] = 32, - [1][1][RTW89_WW][2] = 32, - [1][1][RTW89_WW][3] = 32, - [1][1][RTW89_WW][4] = 32, - [1][1][RTW89_WW][5] = 32, - [1][1][RTW89_WW][6] = 32, - [1][1][RTW89_WW][7] = 32, - [1][1][RTW89_WW][8] = 32, - [1][1][RTW89_WW][9] = 32, - [1][1][RTW89_WW][10] = 32, - [1][1][RTW89_WW][11] = 32, - [1][1][RTW89_WW][12] = 32, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][1] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][3] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][5] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][7] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][9] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][11] = 0, + [1][1][RTW89_WW][12] = 0, [1][1][RTW89_WW][13] = 0, [2][0][RTW89_WW][0] = 54, [2][0][RTW89_WW][1] = 54, @@ -12564,19 +12623,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][11] = 54, [2][0][RTW89_WW][12] = 34, [2][0][RTW89_WW][13] = 0, - [2][1][RTW89_WW][0] = 44, - [2][1][RTW89_WW][1] = 44, - [2][1][RTW89_WW][2] = 44, - [2][1][RTW89_WW][3] = 44, - [2][1][RTW89_WW][4] = 44, - [2][1][RTW89_WW][5] = 44, - [2][1][RTW89_WW][6] = 44, - [2][1][RTW89_WW][7] = 44, - [2][1][RTW89_WW][8] = 44, - [2][1][RTW89_WW][9] = 44, - [2][1][RTW89_WW][10] = 44, - [2][1][RTW89_WW][11] = 44, - [2][1][RTW89_WW][12] = 42, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][1] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][3] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][5] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][7] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][9] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][11] = 0, + [2][1][RTW89_WW][12] = 0, [2][1][RTW89_WW][13] = 0, [0][0][RTW89_FCC][0] = 60, [0][0][RTW89_ETSI][0] = 30, @@ -12696,7 +12755,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 20, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][1] = 127, [0][1][RTW89_ETSI][1] = 127, @@ -12704,7 +12763,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][1] = 127, [0][1][RTW89_KCC][1] = 127, [0][1][RTW89_ACMA][1] = 127, - [0][1][RTW89_CN][1] = 22, + [0][1][RTW89_CN][1] = 127, [0][1][RTW89_UK][1] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -12712,7 +12771,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 22, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][3] = 127, [0][1][RTW89_ETSI][3] = 127, @@ -12720,7 +12779,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][3] = 127, [0][1][RTW89_KCC][3] = 127, [0][1][RTW89_ACMA][3] = 127, - [0][1][RTW89_CN][3] = 22, + [0][1][RTW89_CN][3] = 127, [0][1][RTW89_UK][3] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -12728,7 +12787,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 22, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][5] = 127, [0][1][RTW89_ETSI][5] = 127, @@ -12736,7 +12795,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][5] = 127, [0][1][RTW89_KCC][5] = 127, [0][1][RTW89_ACMA][5] = 127, - [0][1][RTW89_CN][5] = 22, + [0][1][RTW89_CN][5] = 127, [0][1][RTW89_UK][5] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -12744,7 +12803,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 22, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][7] = 127, [0][1][RTW89_ETSI][7] = 127, @@ -12752,7 +12811,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][7] = 127, [0][1][RTW89_KCC][7] = 127, [0][1][RTW89_ACMA][7] = 127, - [0][1][RTW89_CN][7] = 22, + [0][1][RTW89_CN][7] = 127, [0][1][RTW89_UK][7] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -12760,7 +12819,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 22, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][9] = 127, [0][1][RTW89_ETSI][9] = 127, @@ -12768,7 +12827,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][9] = 127, [0][1][RTW89_KCC][9] = 127, [0][1][RTW89_ACMA][9] = 127, - [0][1][RTW89_CN][9] = 22, + [0][1][RTW89_CN][9] = 127, [0][1][RTW89_UK][9] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -12776,7 +12835,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 22, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][11] = 127, [0][1][RTW89_ETSI][11] = 127, @@ -12784,7 +12843,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][11] = 127, [0][1][RTW89_KCC][11] = 127, [0][1][RTW89_ACMA][11] = 127, - [0][1][RTW89_CN][11] = 22, + [0][1][RTW89_CN][11] = 127, [0][1][RTW89_UK][11] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -12792,7 +12851,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 20, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][13] = 127, [0][1][RTW89_ETSI][13] = 127, @@ -12920,7 +12979,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 32, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][1] = 127, [1][1][RTW89_ETSI][1] = 127, @@ -12928,7 +12987,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][1] = 127, [1][1][RTW89_KCC][1] = 127, [1][1][RTW89_ACMA][1] = 127, - [1][1][RTW89_CN][1] = 32, + [1][1][RTW89_CN][1] = 127, [1][1][RTW89_UK][1] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -12936,7 +12995,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 32, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][3] = 127, [1][1][RTW89_ETSI][3] = 127, @@ -12944,7 +13003,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][3] = 127, [1][1][RTW89_KCC][3] = 127, [1][1][RTW89_ACMA][3] = 127, - [1][1][RTW89_CN][3] = 32, + [1][1][RTW89_CN][3] = 127, [1][1][RTW89_UK][3] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -12952,7 +13011,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 32, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][5] = 127, [1][1][RTW89_ETSI][5] = 127, @@ -12960,7 +13019,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][5] = 127, [1][1][RTW89_KCC][5] = 127, [1][1][RTW89_ACMA][5] = 127, - [1][1][RTW89_CN][5] = 32, + [1][1][RTW89_CN][5] = 127, [1][1][RTW89_UK][5] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -12968,7 +13027,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 32, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][7] = 127, [1][1][RTW89_ETSI][7] = 127, @@ -12976,7 +13035,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][7] = 127, [1][1][RTW89_KCC][7] = 127, [1][1][RTW89_ACMA][7] = 127, - [1][1][RTW89_CN][7] = 32, + [1][1][RTW89_CN][7] = 127, [1][1][RTW89_UK][7] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -12984,7 +13043,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 32, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][9] = 127, [1][1][RTW89_ETSI][9] = 127, @@ -12992,7 +13051,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][9] = 127, [1][1][RTW89_KCC][9] = 127, [1][1][RTW89_ACMA][9] = 127, - [1][1][RTW89_CN][9] = 32, + [1][1][RTW89_CN][9] = 127, [1][1][RTW89_UK][9] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -13000,7 +13059,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 32, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][11] = 127, [1][1][RTW89_ETSI][11] = 127, @@ -13008,7 +13067,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][11] = 127, [1][1][RTW89_KCC][11] = 127, [1][1][RTW89_ACMA][11] = 127, - [1][1][RTW89_CN][11] = 32, + [1][1][RTW89_CN][11] = 127, [1][1][RTW89_UK][11] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -13016,7 +13075,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 32, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][13] = 127, [1][1][RTW89_ETSI][13] = 127, @@ -13144,7 +13203,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 44, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][1] = 127, [2][1][RTW89_ETSI][1] = 127, @@ -13152,7 +13211,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][1] = 127, [2][1][RTW89_KCC][1] = 127, [2][1][RTW89_ACMA][1] = 127, - [2][1][RTW89_CN][1] = 44, + [2][1][RTW89_CN][1] = 127, [2][1][RTW89_UK][1] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -13160,7 +13219,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 44, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][3] = 127, [2][1][RTW89_ETSI][3] = 127, @@ -13168,7 +13227,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][3] = 127, [2][1][RTW89_KCC][3] = 127, [2][1][RTW89_ACMA][3] = 127, - [2][1][RTW89_CN][3] = 44, + [2][1][RTW89_CN][3] = 127, [2][1][RTW89_UK][3] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -13176,7 +13235,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 44, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][5] = 127, [2][1][RTW89_ETSI][5] = 127, @@ -13184,7 +13243,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][5] = 127, [2][1][RTW89_KCC][5] = 127, [2][1][RTW89_ACMA][5] = 127, - [2][1][RTW89_CN][5] = 44, + [2][1][RTW89_CN][5] = 127, [2][1][RTW89_UK][5] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -13192,7 +13251,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 44, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][7] = 127, [2][1][RTW89_ETSI][7] = 127, @@ -13200,7 +13259,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][7] = 127, [2][1][RTW89_KCC][7] = 127, [2][1][RTW89_ACMA][7] = 127, - [2][1][RTW89_CN][7] = 44, + [2][1][RTW89_CN][7] = 127, [2][1][RTW89_UK][7] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -13208,7 +13267,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 44, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][9] = 127, [2][1][RTW89_ETSI][9] = 127, @@ -13216,7 +13275,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][9] = 127, [2][1][RTW89_KCC][9] = 127, [2][1][RTW89_ACMA][9] = 127, - [2][1][RTW89_CN][9] = 44, + [2][1][RTW89_CN][9] = 127, [2][1][RTW89_UK][9] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -13224,7 +13283,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 44, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][11] = 127, [2][1][RTW89_ETSI][11] = 127, @@ -13232,7 +13291,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][11] = 127, [2][1][RTW89_KCC][11] = 127, [2][1][RTW89_ACMA][11] = 127, - [2][1][RTW89_CN][11] = 44, + [2][1][RTW89_CN][11] = 127, [2][1][RTW89_UK][11] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -13240,7 +13299,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 42, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][13] = 127, [2][1][RTW89_ETSI][13] = 127, @@ -13283,14 +13342,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][48] = 40, [0][0][RTW89_WW][50] = 42, [0][0][RTW89_WW][52] = 38, - [0][1][RTW89_WW][0] = 4, - [0][1][RTW89_WW][2] = 4, - [0][1][RTW89_WW][4] = 4, - [0][1][RTW89_WW][6] = 4, - [0][1][RTW89_WW][8] = 4, - [0][1][RTW89_WW][10] = 4, - [0][1][RTW89_WW][12] = 4, - [0][1][RTW89_WW][14] = 4, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][12] = 0, + [0][1][RTW89_WW][14] = 0, [0][1][RTW89_WW][15] = 0, [0][1][RTW89_WW][17] = 0, [0][1][RTW89_WW][19] = 0, @@ -13303,11 +13362,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_WW][33] = 0, [0][1][RTW89_WW][35] = 0, [0][1][RTW89_WW][37] = 0, - [0][1][RTW89_WW][38] = 42, - [0][1][RTW89_WW][40] = 42, - [0][1][RTW89_WW][42] = 42, - [0][1][RTW89_WW][44] = 42, - [0][1][RTW89_WW][46] = 42, + [0][1][RTW89_WW][38] = 0, + [0][1][RTW89_WW][40] = 0, + [0][1][RTW89_WW][42] = 0, + [0][1][RTW89_WW][44] = 0, + [0][1][RTW89_WW][46] = 0, [0][1][RTW89_WW][48] = 0, [0][1][RTW89_WW][50] = 0, [0][1][RTW89_WW][52] = 0, @@ -13339,14 +13398,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][48] = 52, [1][0][RTW89_WW][50] = 52, [1][0][RTW89_WW][52] = 50, - [1][1][RTW89_WW][0] = 14, - [1][1][RTW89_WW][2] = 14, - [1][1][RTW89_WW][4] = 14, - [1][1][RTW89_WW][6] = 14, - [1][1][RTW89_WW][8] = 14, - [1][1][RTW89_WW][10] = 14, - [1][1][RTW89_WW][12] = 14, - [1][1][RTW89_WW][14] = 14, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][12] = 0, + [1][1][RTW89_WW][14] = 0, [1][1][RTW89_WW][15] = 0, [1][1][RTW89_WW][17] = 0, [1][1][RTW89_WW][19] = 0, @@ -13359,11 +13418,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][33] = 0, [1][1][RTW89_WW][35] = 0, [1][1][RTW89_WW][37] = 0, - [1][1][RTW89_WW][38] = 54, - [1][1][RTW89_WW][40] = 54, - [1][1][RTW89_WW][42] = 54, - [1][1][RTW89_WW][44] = 54, - [1][1][RTW89_WW][46] = 54, + [1][1][RTW89_WW][38] = 0, + [1][1][RTW89_WW][40] = 0, + [1][1][RTW89_WW][42] = 0, + [1][1][RTW89_WW][44] = 0, + [1][1][RTW89_WW][46] = 0, [1][1][RTW89_WW][48] = 0, [1][1][RTW89_WW][50] = 0, [1][1][RTW89_WW][52] = 0, @@ -13395,14 +13454,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][48] = 62, [2][0][RTW89_WW][50] = 62, [2][0][RTW89_WW][52] = 60, - [2][1][RTW89_WW][0] = 28, - [2][1][RTW89_WW][2] = 28, - [2][1][RTW89_WW][4] = 28, - [2][1][RTW89_WW][6] = 28, - [2][1][RTW89_WW][8] = 28, - [2][1][RTW89_WW][10] = 28, - [2][1][RTW89_WW][12] = 28, - [2][1][RTW89_WW][14] = 28, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][12] = 0, + [2][1][RTW89_WW][14] = 0, [2][1][RTW89_WW][15] = 0, [2][1][RTW89_WW][17] = 0, [2][1][RTW89_WW][19] = 0, @@ -13415,11 +13474,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][33] = 0, [2][1][RTW89_WW][35] = 0, [2][1][RTW89_WW][37] = 0, - [2][1][RTW89_WW][38] = 56, - [2][1][RTW89_WW][40] = 56, - [2][1][RTW89_WW][42] = 56, - [2][1][RTW89_WW][44] = 56, - [2][1][RTW89_WW][46] = 56, + [2][1][RTW89_WW][38] = 0, + [2][1][RTW89_WW][40] = 0, + [2][1][RTW89_WW][42] = 0, + [2][1][RTW89_WW][44] = 0, + [2][1][RTW89_WW][46] = 0, [2][1][RTW89_WW][48] = 0, [2][1][RTW89_WW][50] = 0, [2][1][RTW89_WW][52] = 0, @@ -13653,7 +13712,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 4, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -13661,7 +13720,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 4, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -13669,7 +13728,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 4, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -13677,7 +13736,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 4, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -13685,7 +13744,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 4, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -13693,7 +13752,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 4, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -13701,7 +13760,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 4, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][14] = 127, [0][1][RTW89_ETSI][14] = 127, @@ -13709,7 +13768,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][14] = 127, [0][1][RTW89_KCC][14] = 127, [0][1][RTW89_ACMA][14] = 127, - [0][1][RTW89_CN][14] = 4, + [0][1][RTW89_CN][14] = 127, [0][1][RTW89_UK][14] = 127, [0][1][RTW89_FCC][15] = 127, [0][1][RTW89_ETSI][15] = 127, @@ -13813,7 +13872,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][38] = 127, [0][1][RTW89_KCC][38] = 127, [0][1][RTW89_ACMA][38] = 127, - [0][1][RTW89_CN][38] = 42, + [0][1][RTW89_CN][38] = 127, [0][1][RTW89_UK][38] = 127, [0][1][RTW89_FCC][40] = 127, [0][1][RTW89_ETSI][40] = 127, @@ -13821,7 +13880,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][40] = 127, [0][1][RTW89_KCC][40] = 127, [0][1][RTW89_ACMA][40] = 127, - [0][1][RTW89_CN][40] = 42, + [0][1][RTW89_CN][40] = 127, [0][1][RTW89_UK][40] = 127, [0][1][RTW89_FCC][42] = 127, [0][1][RTW89_ETSI][42] = 127, @@ -13829,7 +13888,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][42] = 127, [0][1][RTW89_KCC][42] = 127, [0][1][RTW89_ACMA][42] = 127, - [0][1][RTW89_CN][42] = 42, + [0][1][RTW89_CN][42] = 127, [0][1][RTW89_UK][42] = 127, [0][1][RTW89_FCC][44] = 127, [0][1][RTW89_ETSI][44] = 127, @@ -13837,7 +13896,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][44] = 127, [0][1][RTW89_KCC][44] = 127, [0][1][RTW89_ACMA][44] = 127, - [0][1][RTW89_CN][44] = 42, + [0][1][RTW89_CN][44] = 127, [0][1][RTW89_UK][44] = 127, [0][1][RTW89_FCC][46] = 127, [0][1][RTW89_ETSI][46] = 127, @@ -13845,7 +13904,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][46] = 127, [0][1][RTW89_KCC][46] = 127, [0][1][RTW89_ACMA][46] = 127, - [0][1][RTW89_CN][46] = 42, + [0][1][RTW89_CN][46] = 127, [0][1][RTW89_UK][46] = 127, [0][1][RTW89_FCC][48] = 127, [0][1][RTW89_ETSI][48] = 127, @@ -14101,7 +14160,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 14, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -14109,7 +14168,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 14, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -14117,7 +14176,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 14, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -14125,7 +14184,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 14, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -14133,7 +14192,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 14, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -14141,7 +14200,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 14, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -14149,7 +14208,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 14, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][14] = 127, [1][1][RTW89_ETSI][14] = 127, @@ -14157,7 +14216,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][14] = 127, [1][1][RTW89_KCC][14] = 127, [1][1][RTW89_ACMA][14] = 127, - [1][1][RTW89_CN][14] = 14, + [1][1][RTW89_CN][14] = 127, [1][1][RTW89_UK][14] = 127, [1][1][RTW89_FCC][15] = 127, [1][1][RTW89_ETSI][15] = 127, @@ -14261,7 +14320,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][38] = 127, [1][1][RTW89_KCC][38] = 127, [1][1][RTW89_ACMA][38] = 127, - [1][1][RTW89_CN][38] = 54, + [1][1][RTW89_CN][38] = 127, [1][1][RTW89_UK][38] = 127, [1][1][RTW89_FCC][40] = 127, [1][1][RTW89_ETSI][40] = 127, @@ -14269,7 +14328,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][40] = 127, [1][1][RTW89_KCC][40] = 127, [1][1][RTW89_ACMA][40] = 127, - [1][1][RTW89_CN][40] = 54, + [1][1][RTW89_CN][40] = 127, [1][1][RTW89_UK][40] = 127, [1][1][RTW89_FCC][42] = 127, [1][1][RTW89_ETSI][42] = 127, @@ -14277,7 +14336,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][42] = 127, [1][1][RTW89_KCC][42] = 127, [1][1][RTW89_ACMA][42] = 127, - [1][1][RTW89_CN][42] = 54, + [1][1][RTW89_CN][42] = 127, [1][1][RTW89_UK][42] = 127, [1][1][RTW89_FCC][44] = 127, [1][1][RTW89_ETSI][44] = 127, @@ -14285,7 +14344,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][44] = 127, [1][1][RTW89_KCC][44] = 127, [1][1][RTW89_ACMA][44] = 127, - [1][1][RTW89_CN][44] = 54, + [1][1][RTW89_CN][44] = 127, [1][1][RTW89_UK][44] = 127, [1][1][RTW89_FCC][46] = 127, [1][1][RTW89_ETSI][46] = 127, @@ -14293,7 +14352,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][46] = 127, [1][1][RTW89_KCC][46] = 127, [1][1][RTW89_ACMA][46] = 127, - [1][1][RTW89_CN][46] = 54, + [1][1][RTW89_CN][46] = 127, [1][1][RTW89_UK][46] = 127, [1][1][RTW89_FCC][48] = 127, [1][1][RTW89_ETSI][48] = 127, @@ -14549,7 +14608,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 28, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -14557,7 +14616,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 28, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -14565,7 +14624,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 28, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -14573,7 +14632,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 28, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -14581,7 +14640,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 28, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -14589,7 +14648,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 28, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -14597,7 +14656,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 28, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][14] = 127, [2][1][RTW89_ETSI][14] = 127, @@ -14605,7 +14664,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][14] = 127, [2][1][RTW89_KCC][14] = 127, [2][1][RTW89_ACMA][14] = 127, - [2][1][RTW89_CN][14] = 28, + [2][1][RTW89_CN][14] = 127, [2][1][RTW89_UK][14] = 127, [2][1][RTW89_FCC][15] = 127, [2][1][RTW89_ETSI][15] = 127, @@ -14709,7 +14768,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][38] = 127, [2][1][RTW89_KCC][38] = 127, [2][1][RTW89_ACMA][38] = 127, - [2][1][RTW89_CN][38] = 56, + [2][1][RTW89_CN][38] = 127, [2][1][RTW89_UK][38] = 127, [2][1][RTW89_FCC][40] = 127, [2][1][RTW89_ETSI][40] = 127, @@ -14717,7 +14776,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][40] = 127, [2][1][RTW89_KCC][40] = 127, [2][1][RTW89_ACMA][40] = 127, - [2][1][RTW89_CN][40] = 56, + [2][1][RTW89_CN][40] = 127, [2][1][RTW89_UK][40] = 127, [2][1][RTW89_FCC][42] = 127, [2][1][RTW89_ETSI][42] = 127, @@ -14725,7 +14784,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][42] = 127, [2][1][RTW89_KCC][42] = 127, [2][1][RTW89_ACMA][42] = 127, - [2][1][RTW89_CN][42] = 56, + [2][1][RTW89_CN][42] = 127, [2][1][RTW89_UK][42] = 127, [2][1][RTW89_FCC][44] = 127, [2][1][RTW89_ETSI][44] = 127, @@ -14733,7 +14792,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][44] = 127, [2][1][RTW89_KCC][44] = 127, [2][1][RTW89_ACMA][44] = 127, - [2][1][RTW89_CN][44] = 56, + [2][1][RTW89_CN][44] = 127, [2][1][RTW89_UK][44] = 127, [2][1][RTW89_FCC][46] = 127, [2][1][RTW89_ETSI][46] = 127, @@ -14741,7 +14800,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][46] = 127, [2][1][RTW89_KCC][46] = 127, [2][1][RTW89_ACMA][46] = 127, - [2][1][RTW89_CN][46] = 56, + [2][1][RTW89_CN][46] = 127, [2][1][RTW89_UK][46] = 127, [2][1][RTW89_FCC][48] = 127, [2][1][RTW89_ETSI][48] = 127, @@ -14794,12 +14853,20 @@ const struct rtw89_phy_table rtw89_8851b_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8851b_byr_table = { .data = rtw89_8851b_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8851b_txpwr_byrate), .load = rtw89_phy_load_txpwr_byrate, }; +static +const struct rtw89_txpwr_table rtw89_8851b_byr_table_type2 = { + .data = rtw89_8851b_txpwr_byrate_type2, + .size = ARRAY_SIZE(rtw89_8851b_txpwr_byrate_type2), + .load = rtw89_phy_load_txpwr_byrate, +}; + const struct rtw89_txpwr_track_cfg rtw89_8851b_trk_cfg = { .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n, .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p, @@ -14810,6 +14877,7 @@ const struct rtw89_txpwr_track_cfg rtw89_8851b_trk_cfg = { }; const struct rtw89_rfe_parms rtw89_8851b_dflt_parms = { + .byr_tbl = &rtw89_8851b_byr_table, .rule_2ghz = { .lmt = &rtw89_8851b_txpwr_lmt_2g, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_2g, @@ -14818,9 +14886,14 @@ const struct rtw89_rfe_parms rtw89_8851b_dflt_parms = { .lmt = &rtw89_8851b_txpwr_lmt_5g, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_5g, }, + .tx_shape = { + .lmt = &rtw89_8851b_tx_shape_lmt, + .lmt_ru = &rtw89_8851b_tx_shape_lmt_ru, + }, }; static const struct rtw89_rfe_parms rtw89_8851b_rfe_parms_type2 = { + .byr_tbl = &rtw89_8851b_byr_table_type2, .rule_2ghz = { .lmt = &rtw89_8851b_txpwr_lmt_2g_type2, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_2g_type2, @@ -14829,6 +14902,10 @@ static const struct rtw89_rfe_parms rtw89_8851b_rfe_parms_type2 = { .lmt = &rtw89_8851b_txpwr_lmt_5g_type2, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_5g_type2, }, + .tx_shape = { + .lmt = &rtw89_8851b_tx_shape_lmt, + .lmt_ru = &rtw89_8851b_tx_shape_lmt_ru, + }, }; const struct rtw89_rfe_parms_conf rtw89_8851b_rfe_parms_conf[] = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h index a8737de02f..d8cf545d40 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h @@ -11,10 +11,7 @@ extern const struct rtw89_phy_table rtw89_8851b_phy_bb_table; extern const struct rtw89_phy_table rtw89_8851b_phy_bb_gain_table; extern const struct rtw89_phy_table rtw89_8851b_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8851b_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8851b_byr_table; extern const struct rtw89_txpwr_track_cfg rtw89_8851b_trk_cfg; -extern const u8 rtw89_8851b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM]; extern const struct rtw89_rfe_parms rtw89_8851b_dflt_parms; extern const struct rtw89_rfe_parms_conf rtw89_8851b_rfe_parms_conf[]; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index d068eae6a2..0c36e6180e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -1624,9 +1624,10 @@ void rtw8852a_bb_tx_mode_switch(struct rtw89_dev *rtwdev, rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx); } -static void rtw8852a_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8852a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8852a_btc_preagc_en_defs_tbl : + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852a_btc_preagc_en_defs_tbl : &rtw8852a_btc_preagc_dis_defs_tbl); } @@ -1683,9 +1684,10 @@ void rtw8852a_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) rtw89_write_rf(rtwdev, path, RR_LUTWE, 0xfffff, 0x0); } -static void rtw8852a_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8852a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG, B_PATH0_BTG_SHEN, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG, B_PATH1_BTG_SHEN, 0x3); rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); @@ -1966,15 +1968,15 @@ static void rtw8852a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) switch (level) { case 0: /* original */ default: - rtw8852a_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852a_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 1: /* for FDD free-run */ - rtw8852a_bb_ctrl_btc_preagc(rtwdev, true); + rtw8852a_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 2: /* for BTG Co-Rx*/ - rtw8852a_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852a_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 1; break; } @@ -2025,6 +2027,7 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8852a = { static const struct rtw89_chip_ops rtw8852a_chip_ops = { .enable_bb_rf = rtw89_mac_enable_bb_rf, .disable_bb_rf = rtw89_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8852a_bb_reset, .bb_sethw = rtw8852a_bb_sethw, .read_rf = rtw89_phy_read_rf, @@ -2045,9 +2048,9 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .set_txpwr_ctrl = rtw8852a_set_txpwr_ctrl, .init_txpwr_unit = rtw8852a_init_txpwr_unit, .get_thermal = rtw8852a_get_thermal, - .ctrl_btg = rtw8852a_ctrl_btg, + .ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx, .query_ppdu = rtw8852a_query_ppdu, - .bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx, .cfg_txrx_path = NULL, .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, .pwr_on_func = NULL, @@ -2081,6 +2084,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .fw_basename = RTW8852A_FW_BASENAME, .fw_format_max = RTW8852A_FW_FORMAT_MAX, .try_ce_fw = false, + .bbmcu_nr = 0, .needed_fw_elms = 0, .fifo_size = 458752, .small_fifo_size = false, @@ -2101,7 +2105,6 @@ const struct rtw89_chip_info rtw8852a_chip_info = { &rtw89_8852a_phy_radiob_table,}, .nctl_table = &rtw89_8852a_phy_nctl_table, .nctl_post_table = NULL, - .byr_table = &rtw89_8852a_byr_table, .dflt_parms = &rtw89_8852a_dflt_parms, .rfe_parms_conf = NULL, .txpwr_factor_rf = 2, @@ -2114,7 +2117,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = { BIT(NL80211_BAND_5GHZ), .support_bw160 = false, .support_unii4 = false, - .support_ul_tb_ctrl = false, + .ul_tb_waveform_ctrl = false, + .ul_tb_pwr_diff = false, .hw_sec_hdr = false, .rf_path_num = 2, .tx_nss = 2, @@ -2157,6 +2161,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), .txwd_body_size = sizeof(struct rtw89_txwd_body), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8852a_h2c_regs, @@ -2170,6 +2175,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .dcfo_comp_sft = 10, .imr_info = &rtw8852a_imr_info, .rrsr_cfgs = &rtw8852a_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP, .dma_ch_mask = 0, .edcca_lvl_reg = R_SEG0R_EDCCA_LVL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c index be54194558..495890c180 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c @@ -51020,6 +51020,7 @@ const struct rtw89_phy_table rtw89_8852a_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8852a_byr_table = { .data = rtw89_8852a_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8852a_txpwr_byrate), @@ -51049,6 +51050,7 @@ const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table = { }; const struct rtw89_rfe_parms rtw89_8852a_dflt_parms = { + .byr_tbl = &rtw89_8852a_byr_table, .rule_2ghz = { .lmt = &rtw89_8852a_txpwr_lmt_2g, .lmt_ru = &rtw89_8852a_txpwr_lmt_ru_2g, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h index 41c379b104..7463ae6ee3 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h @@ -11,7 +11,6 @@ extern const struct rtw89_phy_table rtw89_8852a_phy_bb_table; extern const struct rtw89_phy_table rtw89_8852a_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852a_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852a_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8852a_byr_table; extern const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table; extern const struct rtw89_txpwr_track_cfg rtw89_8852a_trk_cfg; extern const struct rtw89_rfe_parms rtw89_8852a_dflt_parms; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index 0063301952..9d4e6f0821 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -1689,10 +1689,11 @@ static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) { + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = rtw89_8852b_tx_shape[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = rtw89_8852b_tx_shape[band][RTW89_RS_OFDM][regd]; + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; if (band == RTW89_BAND_2G) rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); @@ -1928,15 +1929,17 @@ void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx); } -static void rtw8852b_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8852b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8852b_btc_preagc_en_defs_tbl : + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852b_btc_preagc_en_defs_tbl : &rtw8852b_btc_preagc_dis_defs_tbl); } -static void rtw8852b_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8852b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, B_PATH0_BT_SHARE_V1, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, @@ -2017,9 +2020,9 @@ void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, if (chan->band_type == RTW89_BAND_2G && (rx_path == RF_B || rx_path == RF_AB)) - rtw8852b_ctrl_btg(rtwdev, true); + rtw8852b_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0); else - rtw8852b_ctrl_btg(rtwdev, false); + rtw8852b_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0); rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; @@ -2345,15 +2348,15 @@ static void rtw8852b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) switch (level) { case 0: /* original */ default: - rtw8852b_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 1: /* for FDD free-run */ - rtw8852b_bb_ctrl_btc_preagc(rtwdev, true); + rtw8852b_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 2: /* for BTG Co-Rx*/ - rtw8852b_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 1; break; } @@ -2449,6 +2452,7 @@ static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) static const struct rtw89_chip_ops rtw8852b_chip_ops = { .enable_bb_rf = rtw8852b_mac_enable_bb_rf, .disable_bb_rf = rtw8852b_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8852b_bb_reset, .bb_sethw = rtw8852b_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, @@ -2469,9 +2473,9 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .set_txpwr_ctrl = rtw8852b_set_txpwr_ctrl, .init_txpwr_unit = rtw8852b_init_txpwr_unit, .get_thermal = rtw8852b_get_thermal, - .ctrl_btg = rtw8852b_ctrl_btg, + .ctrl_btg_bt_rx = rtw8852b_ctrl_btg_bt_rx, .query_ppdu = rtw8852b_query_ppdu, - .bb_ctrl_btc_preagc = rtw8852b_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8852b_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8852b_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8852b_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852b_pwr_on_func, @@ -2514,6 +2518,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .fw_basename = RTW8852B_FW_BASENAME, .fw_format_max = RTW8852B_FW_FORMAT_MAX, .try_ce_fw = true, + .bbmcu_nr = 0, .needed_fw_elms = 0, .fifo_size = 196608, .small_fifo_size = true, @@ -2534,7 +2539,6 @@ const struct rtw89_chip_info rtw8852b_chip_info = { &rtw89_8852b_phy_radiob_table,}, .nctl_table = &rtw89_8852b_phy_nctl_table, .nctl_post_table = NULL, - .byr_table = &rtw89_8852b_byr_table, .dflt_parms = &rtw89_8852b_dflt_parms, .rfe_parms_conf = NULL, .txpwr_factor_rf = 2, @@ -2547,7 +2551,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = { BIT(NL80211_BAND_5GHZ), .support_bw160 = false, .support_unii4 = true, - .support_ul_tb_ctrl = true, + .ul_tb_waveform_ctrl = true, + .ul_tb_pwr_diff = false, .hw_sec_hdr = false, .rf_path_num = 2, .tx_nss = 2, @@ -2590,6 +2595,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), .txwd_body_size = sizeof(struct rtw89_txwd_body), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8852b_h2c_regs, @@ -2603,6 +2609,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .dcfo_comp_sft = 10, .imr_info = &rtw8852b_imr_info, .rrsr_cfgs = &rtw8852b_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP_V1, .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) | BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) | diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c index 17124d851a..d2ce16e98b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c @@ -14666,8 +14666,9 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; -const u8 rtw89_8852b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM] = { +static +const u8 rtw89_8852b_tx_shape_lmt[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { [0][0][RTW89_ACMA] = 0, [0][0][RTW89_CHILE] = 0, [0][0][RTW89_CN] = 0, @@ -14706,36 +14707,64 @@ const u8 rtw89_8852b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [1][1][RTW89_UKRAINE] = 0, }; +static +const u8 rtw89_8852b_tx_shape_lmt_ru[RTW89_BAND_NUM][RTW89_REGD_NUM] = { + [0][RTW89_ACMA] = 0, + [0][RTW89_CHILE] = 0, + [0][RTW89_CN] = 0, + [0][RTW89_ETSI] = 0, + [0][RTW89_FCC] = 3, + [0][RTW89_IC] = 3, + [0][RTW89_KCC] = 0, + [0][RTW89_MEXICO] = 3, + [0][RTW89_MKK] = 0, + [0][RTW89_QATAR] = 0, + [0][RTW89_UK] = 0, + [0][RTW89_UKRAINE] = 0, + [1][RTW89_ACMA] = 0, + [1][RTW89_CHILE] = 0, + [1][RTW89_CN] = 0, + [1][RTW89_ETSI] = 0, + [1][RTW89_FCC] = 3, + [1][RTW89_IC] = 3, + [1][RTW89_KCC] = 0, + [1][RTW89_MEXICO] = 3, + [1][RTW89_MKK] = 0, + [1][RTW89_QATAR] = 0, + [1][RTW89_UK] = 0, + [1][RTW89_UKRAINE] = 0, +}; + static const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][RTW89_WW][0] = 58, - [0][0][0][0][RTW89_WW][1] = 58, - [0][0][0][0][RTW89_WW][2] = 58, - [0][0][0][0][RTW89_WW][3] = 58, - [0][0][0][0][RTW89_WW][4] = 58, - [0][0][0][0][RTW89_WW][5] = 58, - [0][0][0][0][RTW89_WW][6] = 58, - [0][0][0][0][RTW89_WW][7] = 58, - [0][0][0][0][RTW89_WW][8] = 58, - [0][0][0][0][RTW89_WW][9] = 58, - [0][0][0][0][RTW89_WW][10] = 58, - [0][0][0][0][RTW89_WW][11] = 58, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, [0][0][0][0][RTW89_WW][12] = 56, [0][0][0][0][RTW89_WW][13] = 76, - [0][1][0][0][RTW89_WW][0] = 46, - [0][1][0][0][RTW89_WW][1] = 46, - [0][1][0][0][RTW89_WW][2] = 46, - [0][1][0][0][RTW89_WW][3] = 46, - [0][1][0][0][RTW89_WW][4] = 46, - [0][1][0][0][RTW89_WW][5] = 46, - [0][1][0][0][RTW89_WW][6] = 46, - [0][1][0][0][RTW89_WW][7] = 46, - [0][1][0][0][RTW89_WW][8] = 46, - [0][1][0][0][RTW89_WW][9] = 46, - [0][1][0][0][RTW89_WW][10] = 46, - [0][1][0][0][RTW89_WW][11] = 46, + [0][1][0][0][RTW89_WW][0] = 44, + [0][1][0][0][RTW89_WW][1] = 44, + [0][1][0][0][RTW89_WW][2] = 44, + [0][1][0][0][RTW89_WW][3] = 44, + [0][1][0][0][RTW89_WW][4] = 44, + [0][1][0][0][RTW89_WW][5] = 44, + [0][1][0][0][RTW89_WW][6] = 44, + [0][1][0][0][RTW89_WW][7] = 44, + [0][1][0][0][RTW89_WW][8] = 44, + [0][1][0][0][RTW89_WW][9] = 44, + [0][1][0][0][RTW89_WW][10] = 44, + [0][1][0][0][RTW89_WW][11] = 44, [0][1][0][0][RTW89_WW][12] = 42, [0][1][0][0][RTW89_WW][13] = 64, [1][0][0][0][RTW89_WW][0] = 0, @@ -14743,7 +14772,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][2] = 50, [1][0][0][0][RTW89_WW][3] = 50, [1][0][0][0][RTW89_WW][4] = 50, - [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][5] = 56, [1][0][0][0][RTW89_WW][6] = 50, [1][0][0][0][RTW89_WW][7] = 50, [1][0][0][0][RTW89_WW][8] = 50, @@ -14754,10 +14783,10 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][13] = 0, [1][1][0][0][RTW89_WW][0] = 0, [1][1][0][0][RTW89_WW][1] = 0, - [1][1][0][0][RTW89_WW][2] = 46, - [1][1][0][0][RTW89_WW][3] = 46, - [1][1][0][0][RTW89_WW][4] = 46, - [1][1][0][0][RTW89_WW][5] = 46, + [1][1][0][0][RTW89_WW][2] = 44, + [1][1][0][0][RTW89_WW][3] = 44, + [1][1][0][0][RTW89_WW][4] = 44, + [1][1][0][0][RTW89_WW][5] = 44, [1][1][0][0][RTW89_WW][6] = 34, [1][1][0][0][RTW89_WW][7] = 34, [1][1][0][0][RTW89_WW][8] = 34, @@ -14846,7 +14875,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][7] = 58, [1][0][2][0][RTW89_WW][8] = 58, [1][0][2][0][RTW89_WW][9] = 58, - [1][0][2][0][RTW89_WW][10] = 58, + [1][0][2][0][RTW89_WW][10] = 40, [1][0][2][0][RTW89_WW][11] = 0, [1][0][2][0][RTW89_WW][12] = 0, [1][0][2][0][RTW89_WW][13] = 0, @@ -14887,7 +14916,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][0] = 64, [0][0][0][0][RTW89_UKRAINE][0] = 58, [0][0][0][0][RTW89_MEXICO][0] = 78, - [0][0][0][0][RTW89_CN][0] = 58, + [0][0][0][0][RTW89_CN][0] = 56, [0][0][0][0][RTW89_QATAR][0] = 58, [0][0][0][0][RTW89_UK][0] = 58, [0][0][0][0][RTW89_FCC][1] = 78, @@ -14899,7 +14928,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][1] = 64, [0][0][0][0][RTW89_UKRAINE][1] = 58, [0][0][0][0][RTW89_MEXICO][1] = 78, - [0][0][0][0][RTW89_CN][1] = 58, + [0][0][0][0][RTW89_CN][1] = 56, [0][0][0][0][RTW89_QATAR][1] = 58, [0][0][0][0][RTW89_UK][1] = 58, [0][0][0][0][RTW89_FCC][2] = 78, @@ -14911,7 +14940,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][2] = 64, [0][0][0][0][RTW89_UKRAINE][2] = 58, [0][0][0][0][RTW89_MEXICO][2] = 78, - [0][0][0][0][RTW89_CN][2] = 58, + [0][0][0][0][RTW89_CN][2] = 56, [0][0][0][0][RTW89_QATAR][2] = 58, [0][0][0][0][RTW89_UK][2] = 58, [0][0][0][0][RTW89_FCC][3] = 78, @@ -14923,7 +14952,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][3] = 64, [0][0][0][0][RTW89_UKRAINE][3] = 58, [0][0][0][0][RTW89_MEXICO][3] = 78, - [0][0][0][0][RTW89_CN][3] = 58, + [0][0][0][0][RTW89_CN][3] = 56, [0][0][0][0][RTW89_QATAR][3] = 58, [0][0][0][0][RTW89_UK][3] = 58, [0][0][0][0][RTW89_FCC][4] = 78, @@ -14935,7 +14964,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][4] = 64, [0][0][0][0][RTW89_UKRAINE][4] = 58, [0][0][0][0][RTW89_MEXICO][4] = 78, - [0][0][0][0][RTW89_CN][4] = 58, + [0][0][0][0][RTW89_CN][4] = 56, [0][0][0][0][RTW89_QATAR][4] = 58, [0][0][0][0][RTW89_UK][4] = 58, [0][0][0][0][RTW89_FCC][5] = 78, @@ -14947,7 +14976,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][5] = 64, [0][0][0][0][RTW89_UKRAINE][5] = 58, [0][0][0][0][RTW89_MEXICO][5] = 78, - [0][0][0][0][RTW89_CN][5] = 58, + [0][0][0][0][RTW89_CN][5] = 56, [0][0][0][0][RTW89_QATAR][5] = 58, [0][0][0][0][RTW89_UK][5] = 58, [0][0][0][0][RTW89_FCC][6] = 78, @@ -14959,7 +14988,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][6] = 64, [0][0][0][0][RTW89_UKRAINE][6] = 58, [0][0][0][0][RTW89_MEXICO][6] = 78, - [0][0][0][0][RTW89_CN][6] = 58, + [0][0][0][0][RTW89_CN][6] = 56, [0][0][0][0][RTW89_QATAR][6] = 58, [0][0][0][0][RTW89_UK][6] = 58, [0][0][0][0][RTW89_FCC][7] = 78, @@ -14971,7 +15000,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][7] = 64, [0][0][0][0][RTW89_UKRAINE][7] = 58, [0][0][0][0][RTW89_MEXICO][7] = 78, - [0][0][0][0][RTW89_CN][7] = 58, + [0][0][0][0][RTW89_CN][7] = 56, [0][0][0][0][RTW89_QATAR][7] = 58, [0][0][0][0][RTW89_UK][7] = 58, [0][0][0][0][RTW89_FCC][8] = 78, @@ -14983,7 +15012,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][8] = 64, [0][0][0][0][RTW89_UKRAINE][8] = 58, [0][0][0][0][RTW89_MEXICO][8] = 78, - [0][0][0][0][RTW89_CN][8] = 58, + [0][0][0][0][RTW89_CN][8] = 56, [0][0][0][0][RTW89_QATAR][8] = 58, [0][0][0][0][RTW89_UK][8] = 58, [0][0][0][0][RTW89_FCC][9] = 78, @@ -14995,7 +15024,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][9] = 64, [0][0][0][0][RTW89_UKRAINE][9] = 58, [0][0][0][0][RTW89_MEXICO][9] = 78, - [0][0][0][0][RTW89_CN][9] = 58, + [0][0][0][0][RTW89_CN][9] = 56, [0][0][0][0][RTW89_QATAR][9] = 58, [0][0][0][0][RTW89_UK][9] = 58, [0][0][0][0][RTW89_FCC][10] = 78, @@ -15007,7 +15036,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][10] = 66, [0][0][0][0][RTW89_UKRAINE][10] = 58, [0][0][0][0][RTW89_MEXICO][10] = 78, - [0][0][0][0][RTW89_CN][10] = 58, + [0][0][0][0][RTW89_CN][10] = 56, [0][0][0][0][RTW89_QATAR][10] = 58, [0][0][0][0][RTW89_UK][10] = 58, [0][0][0][0][RTW89_FCC][11] = 70, @@ -15019,7 +15048,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][11] = 64, [0][0][0][0][RTW89_UKRAINE][11] = 58, [0][0][0][0][RTW89_MEXICO][11] = 70, - [0][0][0][0][RTW89_CN][11] = 58, + [0][0][0][0][RTW89_CN][11] = 56, [0][0][0][0][RTW89_QATAR][11] = 58, [0][0][0][0][RTW89_UK][11] = 58, [0][0][0][0][RTW89_FCC][12] = 56, @@ -15031,7 +15060,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][12] = 56, [0][0][0][0][RTW89_UKRAINE][12] = 58, [0][0][0][0][RTW89_MEXICO][12] = 56, - [0][0][0][0][RTW89_CN][12] = 58, + [0][0][0][0][RTW89_CN][12] = 56, [0][0][0][0][RTW89_QATAR][12] = 58, [0][0][0][0][RTW89_UK][12] = 58, [0][0][0][0][RTW89_FCC][13] = 127, @@ -15055,7 +15084,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][0] = 50, [0][1][0][0][RTW89_UKRAINE][0] = 46, [0][1][0][0][RTW89_MEXICO][0] = 74, - [0][1][0][0][RTW89_CN][0] = 46, + [0][1][0][0][RTW89_CN][0] = 44, [0][1][0][0][RTW89_QATAR][0] = 46, [0][1][0][0][RTW89_UK][0] = 46, [0][1][0][0][RTW89_FCC][1] = 74, @@ -15067,7 +15096,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][1] = 50, [0][1][0][0][RTW89_UKRAINE][1] = 46, [0][1][0][0][RTW89_MEXICO][1] = 74, - [0][1][0][0][RTW89_CN][1] = 46, + [0][1][0][0][RTW89_CN][1] = 44, [0][1][0][0][RTW89_QATAR][1] = 46, [0][1][0][0][RTW89_UK][1] = 46, [0][1][0][0][RTW89_FCC][2] = 74, @@ -15079,7 +15108,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][2] = 50, [0][1][0][0][RTW89_UKRAINE][2] = 46, [0][1][0][0][RTW89_MEXICO][2] = 74, - [0][1][0][0][RTW89_CN][2] = 46, + [0][1][0][0][RTW89_CN][2] = 44, [0][1][0][0][RTW89_QATAR][2] = 46, [0][1][0][0][RTW89_UK][2] = 46, [0][1][0][0][RTW89_FCC][3] = 74, @@ -15091,7 +15120,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][3] = 50, [0][1][0][0][RTW89_UKRAINE][3] = 46, [0][1][0][0][RTW89_MEXICO][3] = 74, - [0][1][0][0][RTW89_CN][3] = 46, + [0][1][0][0][RTW89_CN][3] = 44, [0][1][0][0][RTW89_QATAR][3] = 46, [0][1][0][0][RTW89_UK][3] = 46, [0][1][0][0][RTW89_FCC][4] = 74, @@ -15103,7 +15132,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][4] = 50, [0][1][0][0][RTW89_UKRAINE][4] = 46, [0][1][0][0][RTW89_MEXICO][4] = 74, - [0][1][0][0][RTW89_CN][4] = 46, + [0][1][0][0][RTW89_CN][4] = 44, [0][1][0][0][RTW89_QATAR][4] = 46, [0][1][0][0][RTW89_UK][4] = 46, [0][1][0][0][RTW89_FCC][5] = 74, @@ -15115,7 +15144,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][5] = 50, [0][1][0][0][RTW89_UKRAINE][5] = 46, [0][1][0][0][RTW89_MEXICO][5] = 74, - [0][1][0][0][RTW89_CN][5] = 46, + [0][1][0][0][RTW89_CN][5] = 44, [0][1][0][0][RTW89_QATAR][5] = 46, [0][1][0][0][RTW89_UK][5] = 46, [0][1][0][0][RTW89_FCC][6] = 74, @@ -15127,7 +15156,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][6] = 52, [0][1][0][0][RTW89_UKRAINE][6] = 46, [0][1][0][0][RTW89_MEXICO][6] = 74, - [0][1][0][0][RTW89_CN][6] = 46, + [0][1][0][0][RTW89_CN][6] = 44, [0][1][0][0][RTW89_QATAR][6] = 46, [0][1][0][0][RTW89_UK][6] = 46, [0][1][0][0][RTW89_FCC][7] = 74, @@ -15139,7 +15168,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][7] = 50, [0][1][0][0][RTW89_UKRAINE][7] = 46, [0][1][0][0][RTW89_MEXICO][7] = 74, - [0][1][0][0][RTW89_CN][7] = 46, + [0][1][0][0][RTW89_CN][7] = 44, [0][1][0][0][RTW89_QATAR][7] = 46, [0][1][0][0][RTW89_UK][7] = 46, [0][1][0][0][RTW89_FCC][8] = 74, @@ -15151,7 +15180,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][8] = 50, [0][1][0][0][RTW89_UKRAINE][8] = 46, [0][1][0][0][RTW89_MEXICO][8] = 74, - [0][1][0][0][RTW89_CN][8] = 46, + [0][1][0][0][RTW89_CN][8] = 44, [0][1][0][0][RTW89_QATAR][8] = 46, [0][1][0][0][RTW89_UK][8] = 46, [0][1][0][0][RTW89_FCC][9] = 74, @@ -15163,7 +15192,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][9] = 50, [0][1][0][0][RTW89_UKRAINE][9] = 46, [0][1][0][0][RTW89_MEXICO][9] = 74, - [0][1][0][0][RTW89_CN][9] = 46, + [0][1][0][0][RTW89_CN][9] = 44, [0][1][0][0][RTW89_QATAR][9] = 46, [0][1][0][0][RTW89_UK][9] = 46, [0][1][0][0][RTW89_FCC][10] = 74, @@ -15175,7 +15204,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][10] = 52, [0][1][0][0][RTW89_UKRAINE][10] = 46, [0][1][0][0][RTW89_MEXICO][10] = 74, - [0][1][0][0][RTW89_CN][10] = 46, + [0][1][0][0][RTW89_CN][10] = 44, [0][1][0][0][RTW89_QATAR][10] = 46, [0][1][0][0][RTW89_UK][10] = 46, [0][1][0][0][RTW89_FCC][11] = 54, @@ -15187,7 +15216,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][11] = 50, [0][1][0][0][RTW89_UKRAINE][11] = 46, [0][1][0][0][RTW89_MEXICO][11] = 54, - [0][1][0][0][RTW89_CN][11] = 46, + [0][1][0][0][RTW89_CN][11] = 44, [0][1][0][0][RTW89_QATAR][11] = 46, [0][1][0][0][RTW89_UK][11] = 46, [0][1][0][0][RTW89_FCC][12] = 42, @@ -15199,7 +15228,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][12] = 42, [0][1][0][0][RTW89_UKRAINE][12] = 46, [0][1][0][0][RTW89_MEXICO][12] = 42, - [0][1][0][0][RTW89_CN][12] = 46, + [0][1][0][0][RTW89_CN][12] = 44, [0][1][0][0][RTW89_QATAR][12] = 46, [0][1][0][0][RTW89_UK][12] = 46, [0][1][0][0][RTW89_FCC][13] = 127, @@ -15247,7 +15276,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][2] = 62, [1][0][0][0][RTW89_UKRAINE][2] = 58, [1][0][0][0][RTW89_MEXICO][2] = 50, - [1][0][0][0][RTW89_CN][2] = 58, + [1][0][0][0][RTW89_CN][2] = 56, [1][0][0][0][RTW89_QATAR][2] = 58, [1][0][0][0][RTW89_UK][2] = 58, [1][0][0][0][RTW89_FCC][3] = 50, @@ -15259,7 +15288,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][3] = 62, [1][0][0][0][RTW89_UKRAINE][3] = 58, [1][0][0][0][RTW89_MEXICO][3] = 50, - [1][0][0][0][RTW89_CN][3] = 58, + [1][0][0][0][RTW89_CN][3] = 56, [1][0][0][0][RTW89_QATAR][3] = 58, [1][0][0][0][RTW89_UK][3] = 58, [1][0][0][0][RTW89_FCC][4] = 50, @@ -15271,7 +15300,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][4] = 62, [1][0][0][0][RTW89_UKRAINE][4] = 58, [1][0][0][0][RTW89_MEXICO][4] = 50, - [1][0][0][0][RTW89_CN][4] = 58, + [1][0][0][0][RTW89_CN][4] = 56, [1][0][0][0][RTW89_QATAR][4] = 58, [1][0][0][0][RTW89_UK][4] = 58, [1][0][0][0][RTW89_FCC][5] = 66, @@ -15283,7 +15312,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][5] = 62, [1][0][0][0][RTW89_UKRAINE][5] = 58, [1][0][0][0][RTW89_MEXICO][5] = 66, - [1][0][0][0][RTW89_CN][5] = 58, + [1][0][0][0][RTW89_CN][5] = 56, [1][0][0][0][RTW89_QATAR][5] = 58, [1][0][0][0][RTW89_UK][5] = 58, [1][0][0][0][RTW89_FCC][6] = 50, @@ -15295,7 +15324,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][6] = 62, [1][0][0][0][RTW89_UKRAINE][6] = 58, [1][0][0][0][RTW89_MEXICO][6] = 50, - [1][0][0][0][RTW89_CN][6] = 58, + [1][0][0][0][RTW89_CN][6] = 56, [1][0][0][0][RTW89_QATAR][6] = 58, [1][0][0][0][RTW89_UK][6] = 58, [1][0][0][0][RTW89_FCC][7] = 50, @@ -15307,7 +15336,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][7] = 62, [1][0][0][0][RTW89_UKRAINE][7] = 58, [1][0][0][0][RTW89_MEXICO][7] = 50, - [1][0][0][0][RTW89_CN][7] = 58, + [1][0][0][0][RTW89_CN][7] = 56, [1][0][0][0][RTW89_QATAR][7] = 58, [1][0][0][0][RTW89_UK][7] = 58, [1][0][0][0][RTW89_FCC][8] = 50, @@ -15319,7 +15348,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][8] = 62, [1][0][0][0][RTW89_UKRAINE][8] = 58, [1][0][0][0][RTW89_MEXICO][8] = 50, - [1][0][0][0][RTW89_CN][8] = 58, + [1][0][0][0][RTW89_CN][8] = 56, [1][0][0][0][RTW89_QATAR][8] = 58, [1][0][0][0][RTW89_UK][8] = 58, [1][0][0][0][RTW89_FCC][9] = 42, @@ -15331,7 +15360,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][9] = 42, [1][0][0][0][RTW89_UKRAINE][9] = 58, [1][0][0][0][RTW89_MEXICO][9] = 42, - [1][0][0][0][RTW89_CN][9] = 58, + [1][0][0][0][RTW89_CN][9] = 56, [1][0][0][0][RTW89_QATAR][9] = 58, [1][0][0][0][RTW89_UK][9] = 58, [1][0][0][0][RTW89_FCC][10] = 30, @@ -15343,7 +15372,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][10] = 30, [1][0][0][0][RTW89_UKRAINE][10] = 58, [1][0][0][0][RTW89_MEXICO][10] = 30, - [1][0][0][0][RTW89_CN][10] = 58, + [1][0][0][0][RTW89_CN][10] = 56, [1][0][0][0][RTW89_QATAR][10] = 58, [1][0][0][0][RTW89_UK][10] = 58, [1][0][0][0][RTW89_FCC][11] = 127, @@ -15415,7 +15444,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][2] = 50, [1][1][0][0][RTW89_UKRAINE][2] = 46, [1][1][0][0][RTW89_MEXICO][2] = 46, - [1][1][0][0][RTW89_CN][2] = 46, + [1][1][0][0][RTW89_CN][2] = 44, [1][1][0][0][RTW89_QATAR][2] = 46, [1][1][0][0][RTW89_UK][2] = 46, [1][1][0][0][RTW89_FCC][3] = 46, @@ -15427,7 +15456,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][3] = 50, [1][1][0][0][RTW89_UKRAINE][3] = 46, [1][1][0][0][RTW89_MEXICO][3] = 46, - [1][1][0][0][RTW89_CN][3] = 46, + [1][1][0][0][RTW89_CN][3] = 44, [1][1][0][0][RTW89_QATAR][3] = 46, [1][1][0][0][RTW89_UK][3] = 46, [1][1][0][0][RTW89_FCC][4] = 46, @@ -15439,7 +15468,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][4] = 50, [1][1][0][0][RTW89_UKRAINE][4] = 46, [1][1][0][0][RTW89_MEXICO][4] = 46, - [1][1][0][0][RTW89_CN][4] = 46, + [1][1][0][0][RTW89_CN][4] = 44, [1][1][0][0][RTW89_QATAR][4] = 46, [1][1][0][0][RTW89_UK][4] = 46, [1][1][0][0][RTW89_FCC][5] = 62, @@ -15451,7 +15480,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][5] = 50, [1][1][0][0][RTW89_UKRAINE][5] = 46, [1][1][0][0][RTW89_MEXICO][5] = 62, - [1][1][0][0][RTW89_CN][5] = 46, + [1][1][0][0][RTW89_CN][5] = 44, [1][1][0][0][RTW89_QATAR][5] = 46, [1][1][0][0][RTW89_UK][5] = 46, [1][1][0][0][RTW89_FCC][6] = 34, @@ -15463,7 +15492,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][6] = 50, [1][1][0][0][RTW89_UKRAINE][6] = 46, [1][1][0][0][RTW89_MEXICO][6] = 34, - [1][1][0][0][RTW89_CN][6] = 46, + [1][1][0][0][RTW89_CN][6] = 44, [1][1][0][0][RTW89_QATAR][6] = 46, [1][1][0][0][RTW89_UK][6] = 46, [1][1][0][0][RTW89_FCC][7] = 34, @@ -15475,7 +15504,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][7] = 50, [1][1][0][0][RTW89_UKRAINE][7] = 46, [1][1][0][0][RTW89_MEXICO][7] = 34, - [1][1][0][0][RTW89_CN][7] = 46, + [1][1][0][0][RTW89_CN][7] = 44, [1][1][0][0][RTW89_QATAR][7] = 46, [1][1][0][0][RTW89_UK][7] = 46, [1][1][0][0][RTW89_FCC][8] = 34, @@ -15487,7 +15516,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][8] = 50, [1][1][0][0][RTW89_UKRAINE][8] = 46, [1][1][0][0][RTW89_MEXICO][8] = 34, - [1][1][0][0][RTW89_CN][8] = 46, + [1][1][0][0][RTW89_CN][8] = 44, [1][1][0][0][RTW89_QATAR][8] = 46, [1][1][0][0][RTW89_UK][8] = 46, [1][1][0][0][RTW89_FCC][9] = 30, @@ -15499,7 +15528,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][9] = 30, [1][1][0][0][RTW89_UKRAINE][9] = 46, [1][1][0][0][RTW89_MEXICO][9] = 30, - [1][1][0][0][RTW89_CN][9] = 46, + [1][1][0][0][RTW89_CN][9] = 44, [1][1][0][0][RTW89_QATAR][9] = 46, [1][1][0][0][RTW89_UK][9] = 46, [1][1][0][0][RTW89_FCC][10] = 30, @@ -15511,7 +15540,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][10] = 30, [1][1][0][0][RTW89_UKRAINE][10] = 46, [1][1][0][0][RTW89_MEXICO][10] = 30, - [1][1][0][0][RTW89_CN][10] = 46, + [1][1][0][0][RTW89_CN][10] = 44, [1][1][0][0][RTW89_QATAR][10] = 46, [1][1][0][0][RTW89_UK][10] = 46, [1][1][0][0][RTW89_FCC][11] = 127, @@ -16519,7 +16548,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_CHILE][10] = 66, [1][0][2][0][RTW89_UKRAINE][10] = 58, [1][0][2][0][RTW89_MEXICO][10] = 66, - [1][0][2][0][RTW89_CN][10] = 58, + [1][0][2][0][RTW89_CN][10] = 40, [1][0][2][0][RTW89_QATAR][10] = 58, [1][0][2][0][RTW89_UK][10] = 58, [1][0][2][0][RTW89_FCC][11] = 127, @@ -16687,7 +16716,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_CHILE][10] = 38, [1][1][2][0][RTW89_UKRAINE][10] = 46, [1][1][2][0][RTW89_MEXICO][10] = 38, - [1][1][2][0][RTW89_CN][10] = 46, + [1][1][2][0][RTW89_CN][10] = 40, [1][1][2][0][RTW89_QATAR][10] = 46, [1][1][2][0][RTW89_UK][10] = 46, [1][1][2][0][RTW89_FCC][11] = 127, @@ -16907,7 +16936,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][8] = 52, [0][0][1][0][RTW89_WW][10] = 52, [0][0][1][0][RTW89_WW][12] = 52, - [0][0][1][0][RTW89_WW][14] = 52, + [0][0][1][0][RTW89_WW][14] = 1, [0][0][1][0][RTW89_WW][15] = 52, [0][0][1][0][RTW89_WW][17] = 52, [0][0][1][0][RTW89_WW][19] = 52, @@ -16928,7 +16957,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][48] = 78, [0][0][1][0][RTW89_WW][50] = 78, [0][0][1][0][RTW89_WW][52] = 78, - [0][1][1][0][RTW89_WW][0] = 30, + [0][1][1][0][RTW89_WW][0] = 1, [0][1][1][0][RTW89_WW][2] = 32, [0][1][1][0][RTW89_WW][4] = 30, [0][1][1][0][RTW89_WW][6] = 30, @@ -17198,7 +17227,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][14] = 78, [0][0][1][0][RTW89_CN][14] = 58, [0][0][1][0][RTW89_QATAR][14] = 58, - [0][0][1][0][RTW89_UK][14] = 58, + [0][0][1][0][RTW89_UK][14] = 1, [0][0][1][0][RTW89_FCC][15] = 76, [0][0][1][0][RTW89_ETSI][15] = 58, [0][0][1][0][RTW89_MKK][15] = 76, @@ -17352,7 +17381,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][38] = 68, [0][0][1][0][RTW89_UKRAINE][38] = 28, [0][0][1][0][RTW89_MEXICO][38] = 78, - [0][0][1][0][RTW89_CN][38] = 76, + [0][0][1][0][RTW89_CN][38] = 62, [0][0][1][0][RTW89_QATAR][38] = 28, [0][0][1][0][RTW89_UK][38] = 58, [0][0][1][0][RTW89_FCC][40] = 78, @@ -17364,7 +17393,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][40] = 68, [0][0][1][0][RTW89_UKRAINE][40] = 28, [0][0][1][0][RTW89_MEXICO][40] = 78, - [0][0][1][0][RTW89_CN][40] = 76, + [0][0][1][0][RTW89_CN][40] = 62, [0][0][1][0][RTW89_QATAR][40] = 28, [0][0][1][0][RTW89_UK][40] = 58, [0][0][1][0][RTW89_FCC][42] = 78, @@ -17376,7 +17405,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][42] = 66, [0][0][1][0][RTW89_UKRAINE][42] = 28, [0][0][1][0][RTW89_MEXICO][42] = 78, - [0][0][1][0][RTW89_CN][42] = 76, + [0][0][1][0][RTW89_CN][42] = 62, [0][0][1][0][RTW89_QATAR][42] = 28, [0][0][1][0][RTW89_UK][42] = 58, [0][0][1][0][RTW89_FCC][44] = 78, @@ -17388,7 +17417,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][44] = 68, [0][0][1][0][RTW89_UKRAINE][44] = 28, [0][0][1][0][RTW89_MEXICO][44] = 78, - [0][0][1][0][RTW89_CN][44] = 76, + [0][0][1][0][RTW89_CN][44] = 62, [0][0][1][0][RTW89_QATAR][44] = 28, [0][0][1][0][RTW89_UK][44] = 58, [0][0][1][0][RTW89_FCC][46] = 78, @@ -17400,7 +17429,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][46] = 68, [0][0][1][0][RTW89_UKRAINE][46] = 28, [0][0][1][0][RTW89_MEXICO][46] = 78, - [0][0][1][0][RTW89_CN][46] = 76, + [0][0][1][0][RTW89_CN][46] = 62, [0][0][1][0][RTW89_QATAR][46] = 28, [0][0][1][0][RTW89_UK][46] = 58, [0][0][1][0][RTW89_FCC][48] = 78, @@ -17450,7 +17479,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][0] = 50, [0][1][1][0][RTW89_CN][0] = 46, [0][1][1][0][RTW89_QATAR][0] = 46, - [0][1][1][0][RTW89_UK][0] = 46, + [0][1][1][0][RTW89_UK][0] = 1, [0][1][1][0][RTW89_FCC][2] = 68, [0][1][1][0][RTW89_ETSI][2] = 46, [0][1][1][0][RTW89_MKK][2] = 48, @@ -17688,7 +17717,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][38] = 48, [0][1][1][0][RTW89_UKRAINE][38] = 16, [0][1][1][0][RTW89_MEXICO][38] = 78, - [0][1][1][0][RTW89_CN][38] = 76, + [0][1][1][0][RTW89_CN][38] = 62, [0][1][1][0][RTW89_QATAR][38] = 16, [0][1][1][0][RTW89_UK][38] = 46, [0][1][1][0][RTW89_FCC][40] = 78, @@ -17700,7 +17729,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][40] = 48, [0][1][1][0][RTW89_UKRAINE][40] = 16, [0][1][1][0][RTW89_MEXICO][40] = 78, - [0][1][1][0][RTW89_CN][40] = 76, + [0][1][1][0][RTW89_CN][40] = 62, [0][1][1][0][RTW89_QATAR][40] = 16, [0][1][1][0][RTW89_UK][40] = 46, [0][1][1][0][RTW89_FCC][42] = 78, @@ -17712,7 +17741,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][42] = 48, [0][1][1][0][RTW89_UKRAINE][42] = 16, [0][1][1][0][RTW89_MEXICO][42] = 78, - [0][1][1][0][RTW89_CN][42] = 76, + [0][1][1][0][RTW89_CN][42] = 62, [0][1][1][0][RTW89_QATAR][42] = 16, [0][1][1][0][RTW89_UK][42] = 46, [0][1][1][0][RTW89_FCC][44] = 78, @@ -17724,7 +17753,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][44] = 48, [0][1][1][0][RTW89_UKRAINE][44] = 16, [0][1][1][0][RTW89_MEXICO][44] = 78, - [0][1][1][0][RTW89_CN][44] = 76, + [0][1][1][0][RTW89_CN][44] = 62, [0][1][1][0][RTW89_QATAR][44] = 16, [0][1][1][0][RTW89_UK][44] = 46, [0][1][1][0][RTW89_FCC][46] = 78, @@ -17736,7 +17765,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][46] = 48, [0][1][1][0][RTW89_UKRAINE][46] = 16, [0][1][1][0][RTW89_MEXICO][46] = 78, - [0][1][1][0][RTW89_CN][46] = 76, + [0][1][1][0][RTW89_CN][46] = 62, [0][1][1][0][RTW89_QATAR][46] = 16, [0][1][1][0][RTW89_UK][46] = 46, [0][1][1][0][RTW89_FCC][48] = 56, @@ -17784,7 +17813,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][0] = 42, [0][0][2][0][RTW89_UKRAINE][0] = 52, [0][0][2][0][RTW89_MEXICO][0] = 62, - [0][0][2][0][RTW89_CN][0] = 60, + [0][0][2][0][RTW89_CN][0] = 58, [0][0][2][0][RTW89_QATAR][0] = 60, [0][0][2][0][RTW89_UK][0] = 60, [0][0][2][0][RTW89_FCC][2] = 78, @@ -17796,7 +17825,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][2] = 42, [0][0][2][0][RTW89_UKRAINE][2] = 52, [0][0][2][0][RTW89_MEXICO][2] = 62, - [0][0][2][0][RTW89_CN][2] = 60, + [0][0][2][0][RTW89_CN][2] = 58, [0][0][2][0][RTW89_QATAR][2] = 60, [0][0][2][0][RTW89_UK][2] = 60, [0][0][2][0][RTW89_FCC][4] = 78, @@ -17808,7 +17837,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][4] = 42, [0][0][2][0][RTW89_UKRAINE][4] = 52, [0][0][2][0][RTW89_MEXICO][4] = 62, - [0][0][2][0][RTW89_CN][4] = 60, + [0][0][2][0][RTW89_CN][4] = 58, [0][0][2][0][RTW89_QATAR][4] = 60, [0][0][2][0][RTW89_UK][4] = 60, [0][0][2][0][RTW89_FCC][6] = 78, @@ -17820,7 +17849,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][6] = 42, [0][0][2][0][RTW89_UKRAINE][6] = 52, [0][0][2][0][RTW89_MEXICO][6] = 62, - [0][0][2][0][RTW89_CN][6] = 60, + [0][0][2][0][RTW89_CN][6] = 58, [0][0][2][0][RTW89_QATAR][6] = 60, [0][0][2][0][RTW89_UK][6] = 60, [0][0][2][0][RTW89_FCC][8] = 78, @@ -18024,7 +18053,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][38] = 64, [0][0][2][0][RTW89_UKRAINE][38] = 28, [0][0][2][0][RTW89_MEXICO][38] = 78, - [0][0][2][0][RTW89_CN][38] = 76, + [0][0][2][0][RTW89_CN][38] = 62, [0][0][2][0][RTW89_QATAR][38] = 28, [0][0][2][0][RTW89_UK][38] = 60, [0][0][2][0][RTW89_FCC][40] = 78, @@ -18036,7 +18065,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][40] = 64, [0][0][2][0][RTW89_UKRAINE][40] = 28, [0][0][2][0][RTW89_MEXICO][40] = 78, - [0][0][2][0][RTW89_CN][40] = 76, + [0][0][2][0][RTW89_CN][40] = 62, [0][0][2][0][RTW89_QATAR][40] = 28, [0][0][2][0][RTW89_UK][40] = 60, [0][0][2][0][RTW89_FCC][42] = 78, @@ -18048,7 +18077,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][42] = 64, [0][0][2][0][RTW89_UKRAINE][42] = 28, [0][0][2][0][RTW89_MEXICO][42] = 78, - [0][0][2][0][RTW89_CN][42] = 76, + [0][0][2][0][RTW89_CN][42] = 62, [0][0][2][0][RTW89_QATAR][42] = 28, [0][0][2][0][RTW89_UK][42] = 60, [0][0][2][0][RTW89_FCC][44] = 78, @@ -18060,7 +18089,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][44] = 66, [0][0][2][0][RTW89_UKRAINE][44] = 28, [0][0][2][0][RTW89_MEXICO][44] = 78, - [0][0][2][0][RTW89_CN][44] = 76, + [0][0][2][0][RTW89_CN][44] = 62, [0][0][2][0][RTW89_QATAR][44] = 28, [0][0][2][0][RTW89_UK][44] = 60, [0][0][2][0][RTW89_FCC][46] = 78, @@ -18072,7 +18101,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][46] = 66, [0][0][2][0][RTW89_UKRAINE][46] = 28, [0][0][2][0][RTW89_MEXICO][46] = 78, - [0][0][2][0][RTW89_CN][46] = 76, + [0][0][2][0][RTW89_CN][46] = 62, [0][0][2][0][RTW89_QATAR][46] = 28, [0][0][2][0][RTW89_UK][46] = 60, [0][0][2][0][RTW89_FCC][48] = 78, @@ -18120,7 +18149,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][0] = 30, [0][1][2][0][RTW89_UKRAINE][0] = 40, [0][1][2][0][RTW89_MEXICO][0] = 50, - [0][1][2][0][RTW89_CN][0] = 48, + [0][1][2][0][RTW89_CN][0] = 46, [0][1][2][0][RTW89_QATAR][0] = 48, [0][1][2][0][RTW89_UK][0] = 48, [0][1][2][0][RTW89_FCC][2] = 70, @@ -18132,7 +18161,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][2] = 30, [0][1][2][0][RTW89_UKRAINE][2] = 40, [0][1][2][0][RTW89_MEXICO][2] = 50, - [0][1][2][0][RTW89_CN][2] = 48, + [0][1][2][0][RTW89_CN][2] = 46, [0][1][2][0][RTW89_QATAR][2] = 48, [0][1][2][0][RTW89_UK][2] = 48, [0][1][2][0][RTW89_FCC][4] = 70, @@ -18144,7 +18173,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][4] = 30, [0][1][2][0][RTW89_UKRAINE][4] = 40, [0][1][2][0][RTW89_MEXICO][4] = 50, - [0][1][2][0][RTW89_CN][4] = 48, + [0][1][2][0][RTW89_CN][4] = 46, [0][1][2][0][RTW89_QATAR][4] = 48, [0][1][2][0][RTW89_UK][4] = 48, [0][1][2][0][RTW89_FCC][6] = 70, @@ -18156,7 +18185,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][6] = 30, [0][1][2][0][RTW89_UKRAINE][6] = 40, [0][1][2][0][RTW89_MEXICO][6] = 50, - [0][1][2][0][RTW89_CN][6] = 48, + [0][1][2][0][RTW89_CN][6] = 46, [0][1][2][0][RTW89_QATAR][6] = 48, [0][1][2][0][RTW89_UK][6] = 48, [0][1][2][0][RTW89_FCC][8] = 70, @@ -18360,7 +18389,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][38] = 50, [0][1][2][0][RTW89_UKRAINE][38] = 16, [0][1][2][0][RTW89_MEXICO][38] = 78, - [0][1][2][0][RTW89_CN][38] = 76, + [0][1][2][0][RTW89_CN][38] = 62, [0][1][2][0][RTW89_QATAR][38] = 16, [0][1][2][0][RTW89_UK][38] = 48, [0][1][2][0][RTW89_FCC][40] = 78, @@ -18372,7 +18401,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][40] = 50, [0][1][2][0][RTW89_UKRAINE][40] = 16, [0][1][2][0][RTW89_MEXICO][40] = 78, - [0][1][2][0][RTW89_CN][40] = 76, + [0][1][2][0][RTW89_CN][40] = 62, [0][1][2][0][RTW89_QATAR][40] = 16, [0][1][2][0][RTW89_UK][40] = 48, [0][1][2][0][RTW89_FCC][42] = 78, @@ -18384,7 +18413,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][42] = 52, [0][1][2][0][RTW89_UKRAINE][42] = 16, [0][1][2][0][RTW89_MEXICO][42] = 78, - [0][1][2][0][RTW89_CN][42] = 76, + [0][1][2][0][RTW89_CN][42] = 62, [0][1][2][0][RTW89_QATAR][42] = 16, [0][1][2][0][RTW89_UK][42] = 48, [0][1][2][0][RTW89_FCC][44] = 78, @@ -18396,7 +18425,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][44] = 52, [0][1][2][0][RTW89_UKRAINE][44] = 16, [0][1][2][0][RTW89_MEXICO][44] = 78, - [0][1][2][0][RTW89_CN][44] = 76, + [0][1][2][0][RTW89_CN][44] = 62, [0][1][2][0][RTW89_QATAR][44] = 16, [0][1][2][0][RTW89_UK][44] = 48, [0][1][2][0][RTW89_FCC][46] = 78, @@ -18408,7 +18437,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][46] = 52, [0][1][2][0][RTW89_UKRAINE][46] = 16, [0][1][2][0][RTW89_MEXICO][46] = 78, - [0][1][2][0][RTW89_CN][46] = 76, + [0][1][2][0][RTW89_CN][46] = 62, [0][1][2][0][RTW89_QATAR][46] = 16, [0][1][2][0][RTW89_UK][46] = 48, [0][1][2][0][RTW89_FCC][48] = 58, @@ -18456,7 +18485,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][0] = 14, [0][1][2][1][RTW89_UKRAINE][0] = 28, [0][1][2][1][RTW89_MEXICO][0] = 50, - [0][1][2][1][RTW89_CN][0] = 36, + [0][1][2][1][RTW89_CN][0] = 34, [0][1][2][1][RTW89_QATAR][0] = 36, [0][1][2][1][RTW89_UK][0] = 36, [0][1][2][1][RTW89_FCC][2] = 68, @@ -18468,7 +18497,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][2] = 14, [0][1][2][1][RTW89_UKRAINE][2] = 28, [0][1][2][1][RTW89_MEXICO][2] = 50, - [0][1][2][1][RTW89_CN][2] = 36, + [0][1][2][1][RTW89_CN][2] = 34, [0][1][2][1][RTW89_QATAR][2] = 36, [0][1][2][1][RTW89_UK][2] = 36, [0][1][2][1][RTW89_FCC][4] = 68, @@ -18480,7 +18509,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][4] = 14, [0][1][2][1][RTW89_UKRAINE][4] = 28, [0][1][2][1][RTW89_MEXICO][4] = 50, - [0][1][2][1][RTW89_CN][4] = 36, + [0][1][2][1][RTW89_CN][4] = 34, [0][1][2][1][RTW89_QATAR][4] = 36, [0][1][2][1][RTW89_UK][4] = 36, [0][1][2][1][RTW89_FCC][6] = 68, @@ -18492,7 +18521,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][6] = 14, [0][1][2][1][RTW89_UKRAINE][6] = 28, [0][1][2][1][RTW89_MEXICO][6] = 50, - [0][1][2][1][RTW89_CN][6] = 36, + [0][1][2][1][RTW89_CN][6] = 34, [0][1][2][1][RTW89_QATAR][6] = 36, [0][1][2][1][RTW89_UK][6] = 36, [0][1][2][1][RTW89_FCC][8] = 68, @@ -18696,7 +18725,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][38] = 36, [0][1][2][1][RTW89_UKRAINE][38] = 4, [0][1][2][1][RTW89_MEXICO][38] = 78, - [0][1][2][1][RTW89_CN][38] = 72, + [0][1][2][1][RTW89_CN][38] = 62, [0][1][2][1][RTW89_QATAR][38] = 4, [0][1][2][1][RTW89_UK][38] = 36, [0][1][2][1][RTW89_FCC][40] = 78, @@ -18708,7 +18737,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][40] = 36, [0][1][2][1][RTW89_UKRAINE][40] = 4, [0][1][2][1][RTW89_MEXICO][40] = 78, - [0][1][2][1][RTW89_CN][40] = 72, + [0][1][2][1][RTW89_CN][40] = 62, [0][1][2][1][RTW89_QATAR][40] = 4, [0][1][2][1][RTW89_UK][40] = 36, [0][1][2][1][RTW89_FCC][42] = 78, @@ -18720,7 +18749,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][42] = 36, [0][1][2][1][RTW89_UKRAINE][42] = 4, [0][1][2][1][RTW89_MEXICO][42] = 78, - [0][1][2][1][RTW89_CN][42] = 72, + [0][1][2][1][RTW89_CN][42] = 62, [0][1][2][1][RTW89_QATAR][42] = 4, [0][1][2][1][RTW89_UK][42] = 36, [0][1][2][1][RTW89_FCC][44] = 78, @@ -18732,7 +18761,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][44] = 36, [0][1][2][1][RTW89_UKRAINE][44] = 4, [0][1][2][1][RTW89_MEXICO][44] = 78, - [0][1][2][1][RTW89_CN][44] = 76, + [0][1][2][1][RTW89_CN][44] = 62, [0][1][2][1][RTW89_QATAR][44] = 4, [0][1][2][1][RTW89_UK][44] = 36, [0][1][2][1][RTW89_FCC][46] = 78, @@ -18744,7 +18773,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][46] = 36, [0][1][2][1][RTW89_UKRAINE][46] = 4, [0][1][2][1][RTW89_MEXICO][46] = 78, - [0][1][2][1][RTW89_CN][46] = 76, + [0][1][2][1][RTW89_CN][46] = 62, [0][1][2][1][RTW89_QATAR][46] = 4, [0][1][2][1][RTW89_UK][46] = 36, [0][1][2][1][RTW89_FCC][48] = 58, @@ -18912,7 +18941,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_CHILE][39] = 64, [1][0][2][0][RTW89_UKRAINE][39] = 28, [1][0][2][0][RTW89_MEXICO][39] = 78, - [1][0][2][0][RTW89_CN][39] = 70, + [1][0][2][0][RTW89_CN][39] = 56, [1][0][2][0][RTW89_QATAR][39] = 28, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_FCC][43] = 78, @@ -18924,7 +18953,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_CHILE][43] = 64, [1][0][2][0][RTW89_UKRAINE][43] = 28, [1][0][2][0][RTW89_MEXICO][43] = 78, - [1][0][2][0][RTW89_CN][43] = 74, + [1][0][2][0][RTW89_CN][43] = 62, [1][0][2][0][RTW89_QATAR][43] = 28, [1][0][2][0][RTW89_UK][43] = 62, [1][0][2][0][RTW89_FCC][47] = 78, @@ -19080,7 +19109,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_CHILE][39] = 52, [1][1][2][0][RTW89_UKRAINE][39] = 16, [1][1][2][0][RTW89_MEXICO][39] = 78, - [1][1][2][0][RTW89_CN][39] = 70, + [1][1][2][0][RTW89_CN][39] = 56, [1][1][2][0][RTW89_QATAR][39] = 16, [1][1][2][0][RTW89_UK][39] = 52, [1][1][2][0][RTW89_FCC][43] = 78, @@ -19092,7 +19121,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_CHILE][43] = 52, [1][1][2][0][RTW89_UKRAINE][43] = 16, [1][1][2][0][RTW89_MEXICO][43] = 78, - [1][1][2][0][RTW89_CN][43] = 74, + [1][1][2][0][RTW89_CN][43] = 62, [1][1][2][0][RTW89_QATAR][43] = 16, [1][1][2][0][RTW89_UK][43] = 52, [1][1][2][0][RTW89_FCC][47] = 68, @@ -19248,7 +19277,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_CHILE][39] = 36, [1][1][2][1][RTW89_UKRAINE][39] = 4, [1][1][2][1][RTW89_MEXICO][39] = 78, - [1][1][2][1][RTW89_CN][39] = 70, + [1][1][2][1][RTW89_CN][39] = 58, [1][1][2][1][RTW89_QATAR][39] = 4, [1][1][2][1][RTW89_UK][39] = 40, [1][1][2][1][RTW89_FCC][43] = 78, @@ -19260,7 +19289,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_CHILE][43] = 36, [1][1][2][1][RTW89_UKRAINE][43] = 4, [1][1][2][1][RTW89_MEXICO][43] = 78, - [1][1][2][1][RTW89_CN][43] = 74, + [1][1][2][1][RTW89_CN][43] = 62, [1][1][2][1][RTW89_QATAR][43] = 4, [1][1][2][1][RTW89_UK][43] = 40, [1][1][2][1][RTW89_FCC][47] = 68, @@ -19356,7 +19385,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_CHILE][41] = 64, [2][0][2][0][RTW89_UKRAINE][41] = 28, [2][0][2][0][RTW89_MEXICO][41] = 74, - [2][0][2][0][RTW89_CN][41] = 70, + [2][0][2][0][RTW89_CN][41] = 48, [2][0][2][0][RTW89_QATAR][41] = 28, [2][0][2][0][RTW89_UK][41] = 64, [2][0][2][0][RTW89_FCC][49] = 64, @@ -19440,7 +19469,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_CHILE][41] = 50, [2][1][2][0][RTW89_UKRAINE][41] = 16, [2][1][2][0][RTW89_MEXICO][41] = 74, - [2][1][2][0][RTW89_CN][41] = 70, + [2][1][2][0][RTW89_CN][41] = 48, [2][1][2][0][RTW89_QATAR][41] = 16, [2][1][2][0][RTW89_UK][41] = 52, [2][1][2][0][RTW89_FCC][49] = 58, @@ -19524,7 +19553,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_CHILE][41] = 36, [2][1][2][1][RTW89_UKRAINE][41] = 4, [2][1][2][1][RTW89_MEXICO][41] = 74, - [2][1][2][1][RTW89_CN][41] = 70, + [2][1][2][1][RTW89_CN][41] = 46, [2][1][2][1][RTW89_QATAR][41] = 4, [2][1][2][1][RTW89_UK][41] = 38, [2][1][2][1][RTW89_FCC][49] = 58, @@ -20669,10 +20698,10 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][48] = 32, [0][0][RTW89_WW][50] = 32, [0][0][RTW89_WW][52] = 32, - [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][0] = 1, [0][1][RTW89_WW][2] = 4, - [0][1][RTW89_WW][4] = 0, - [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][4] = 1, + [0][1][RTW89_WW][6] = 1, [0][1][RTW89_WW][8] = 12, [0][1][RTW89_WW][10] = 12, [0][1][RTW89_WW][12] = 12, @@ -21148,7 +21177,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][0] = 34, [0][1][RTW89_ETSI][0] = 12, [0][1][RTW89_MKK][0] = 12, - [0][1][RTW89_IC][0] = 0, + [0][1][RTW89_IC][0] = 1, [0][1][RTW89_KCC][0] = 28, [0][1][RTW89_ACMA][0] = 12, [0][1][RTW89_CHILE][0] = 14, @@ -21172,7 +21201,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][4] = 34, [0][1][RTW89_ETSI][4] = 12, [0][1][RTW89_MKK][4] = 14, - [0][1][RTW89_IC][4] = 0, + [0][1][RTW89_IC][4] = 1, [0][1][RTW89_KCC][4] = 28, [0][1][RTW89_ACMA][4] = 12, [0][1][RTW89_CHILE][4] = 12, @@ -21184,7 +21213,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][6] = 34, [0][1][RTW89_ETSI][6] = 12, [0][1][RTW89_MKK][6] = 14, - [0][1][RTW89_IC][6] = 0, + [0][1][RTW89_IC][6] = 1, [0][1][RTW89_KCC][6] = 2, [0][1][RTW89_ACMA][6] = 12, [0][1][RTW89_CHILE][6] = 12, @@ -21730,7 +21759,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][38] = 64, [1][0][RTW89_UKRAINE][38] = 28, [1][0][RTW89_MEXICO][38] = 84, - [1][0][RTW89_CN][38] = 74, + [1][0][RTW89_CN][38] = 62, [1][0][RTW89_QATAR][38] = 28, [1][0][RTW89_UK][38] = 38, [1][0][RTW89_FCC][40] = 84, @@ -21742,7 +21771,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][40] = 64, [1][0][RTW89_UKRAINE][40] = 28, [1][0][RTW89_MEXICO][40] = 84, - [1][0][RTW89_CN][40] = 74, + [1][0][RTW89_CN][40] = 62, [1][0][RTW89_QATAR][40] = 28, [1][0][RTW89_UK][40] = 38, [1][0][RTW89_FCC][42] = 84, @@ -21754,7 +21783,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][42] = 64, [1][0][RTW89_UKRAINE][42] = 28, [1][0][RTW89_MEXICO][42] = 84, - [1][0][RTW89_CN][42] = 74, + [1][0][RTW89_CN][42] = 62, [1][0][RTW89_QATAR][42] = 28, [1][0][RTW89_UK][42] = 38, [1][0][RTW89_FCC][44] = 84, @@ -21766,7 +21795,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][44] = 64, [1][0][RTW89_UKRAINE][44] = 28, [1][0][RTW89_MEXICO][44] = 84, - [1][0][RTW89_CN][44] = 74, + [1][0][RTW89_CN][44] = 62, [1][0][RTW89_QATAR][44] = 28, [1][0][RTW89_UK][44] = 38, [1][0][RTW89_FCC][46] = 84, @@ -21778,7 +21807,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][46] = 64, [1][0][RTW89_UKRAINE][46] = 28, [1][0][RTW89_MEXICO][46] = 84, - [1][0][RTW89_CN][46] = 74, + [1][0][RTW89_CN][46] = 62, [1][0][RTW89_QATAR][46] = 28, [1][0][RTW89_UK][46] = 38, [1][0][RTW89_FCC][48] = 44, @@ -22402,7 +22431,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][38] = 64, [2][0][RTW89_UKRAINE][38] = 28, [2][0][RTW89_MEXICO][38] = 84, - [2][0][RTW89_CN][38] = 76, + [2][0][RTW89_CN][38] = 62, [2][0][RTW89_QATAR][38] = 28, [2][0][RTW89_UK][38] = 50, [2][0][RTW89_FCC][40] = 84, @@ -22414,7 +22443,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][40] = 64, [2][0][RTW89_UKRAINE][40] = 28, [2][0][RTW89_MEXICO][40] = 84, - [2][0][RTW89_CN][40] = 76, + [2][0][RTW89_CN][40] = 62, [2][0][RTW89_QATAR][40] = 28, [2][0][RTW89_UK][40] = 50, [2][0][RTW89_FCC][42] = 84, @@ -22426,7 +22455,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][42] = 66, [2][0][RTW89_UKRAINE][42] = 28, [2][0][RTW89_MEXICO][42] = 84, - [2][0][RTW89_CN][42] = 76, + [2][0][RTW89_CN][42] = 62, [2][0][RTW89_QATAR][42] = 28, [2][0][RTW89_UK][42] = 50, [2][0][RTW89_FCC][44] = 84, @@ -22438,7 +22467,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][44] = 64, [2][0][RTW89_UKRAINE][44] = 28, [2][0][RTW89_MEXICO][44] = 84, - [2][0][RTW89_CN][44] = 76, + [2][0][RTW89_CN][44] = 62, [2][0][RTW89_QATAR][44] = 28, [2][0][RTW89_UK][44] = 50, [2][0][RTW89_FCC][46] = 84, @@ -22450,7 +22479,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][46] = 64, [2][0][RTW89_UKRAINE][46] = 28, [2][0][RTW89_MEXICO][46] = 84, - [2][0][RTW89_CN][46] = 76, + [2][0][RTW89_CN][46] = 62, [2][0][RTW89_QATAR][46] = 28, [2][0][RTW89_UK][46] = 50, [2][0][RTW89_FCC][48] = 56, @@ -22738,7 +22767,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][38] = 58, [2][1][RTW89_UKRAINE][38] = 16, [2][1][RTW89_MEXICO][38] = 84, - [2][1][RTW89_CN][38] = 64, + [2][1][RTW89_CN][38] = 62, [2][1][RTW89_QATAR][38] = 16, [2][1][RTW89_UK][38] = 38, [2][1][RTW89_FCC][40] = 84, @@ -22750,7 +22779,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][40] = 58, [2][1][RTW89_UKRAINE][40] = 16, [2][1][RTW89_MEXICO][40] = 84, - [2][1][RTW89_CN][40] = 64, + [2][1][RTW89_CN][40] = 62, [2][1][RTW89_QATAR][40] = 16, [2][1][RTW89_UK][40] = 38, [2][1][RTW89_FCC][42] = 84, @@ -22762,7 +22791,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][42] = 58, [2][1][RTW89_UKRAINE][42] = 16, [2][1][RTW89_MEXICO][42] = 84, - [2][1][RTW89_CN][42] = 64, + [2][1][RTW89_CN][42] = 62, [2][1][RTW89_QATAR][42] = 16, [2][1][RTW89_UK][42] = 38, [2][1][RTW89_FCC][44] = 84, @@ -22774,7 +22803,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][44] = 58, [2][1][RTW89_UKRAINE][44] = 16, [2][1][RTW89_MEXICO][44] = 84, - [2][1][RTW89_CN][44] = 64, + [2][1][RTW89_CN][44] = 62, [2][1][RTW89_QATAR][44] = 16, [2][1][RTW89_UK][44] = 38, [2][1][RTW89_FCC][46] = 84, @@ -22786,7 +22815,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][46] = 58, [2][1][RTW89_UKRAINE][46] = 16, [2][1][RTW89_MEXICO][46] = 84, - [2][1][RTW89_CN][46] = 64, + [2][1][RTW89_CN][46] = 62, [2][1][RTW89_QATAR][46] = 16, [2][1][RTW89_UK][46] = 38, [2][1][RTW89_FCC][48] = 44, @@ -22859,6 +22888,7 @@ const struct rtw89_phy_table rtw89_8852b_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8852b_byr_table = { .data = rtw89_8852b_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8852b_txpwr_byrate), @@ -22881,6 +22911,7 @@ const struct rtw89_txpwr_track_cfg rtw89_8852b_trk_cfg = { }; const struct rtw89_rfe_parms rtw89_8852b_dflt_parms = { + .byr_tbl = &rtw89_8852b_byr_table, .rule_2ghz = { .lmt = &rtw89_8852b_txpwr_lmt_2g, .lmt_ru = &rtw89_8852b_txpwr_lmt_ru_2g, @@ -22889,4 +22920,8 @@ const struct rtw89_rfe_parms rtw89_8852b_dflt_parms = { .lmt = &rtw89_8852b_txpwr_lmt_5g, .lmt_ru = &rtw89_8852b_txpwr_lmt_ru_5g, }, + .tx_shape = { + .lmt = &rtw89_8852b_tx_shape_lmt, + .lmt_ru = &rtw89_8852b_tx_shape_lmt_ru, + }, }; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h index 7ef217629f..da6c90e2ba 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h @@ -12,10 +12,7 @@ extern const struct rtw89_phy_table rtw89_8852b_phy_bb_gain_table; extern const struct rtw89_phy_table rtw89_8852b_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852b_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852b_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8852b_byr_table; extern const struct rtw89_txpwr_track_cfg rtw89_8852b_trk_cfg; -extern const u8 rtw89_8852b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM]; extern const struct rtw89_rfe_parms rtw89_8852b_dflt_parms; #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 1e16cc0a05..3b7d8ab39b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2022 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "debug.h" #include "fw.h" @@ -166,7 +167,9 @@ static const struct rtw89_dig_regs rtw8852c_dig_regs = { B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK}, }; -static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg); +static void rtw8852c_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); + static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, enum rtw89_mac_idx mac_idx); @@ -1650,7 +1653,8 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev, } rtw8852c_spur_elimination(rtwdev, chan, pri_ch_idx, phy_idx); - rtw8852c_ctrl_btg(rtwdev, chan->band_type == RTW89_BAND_2G); + rtw8852c_ctrl_btg_bt_rx(rtwdev, chan->band_type == RTW89_BAND_2G, + RTW89_PHY_0); rtw8852c_5m_mask(rtwdev, chan, phy_idx); if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && @@ -1776,6 +1780,7 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) rtwdev->is_tssi_mode[RF_PATH_B] = false; memset(rfk_mcc, 0, sizeof(*rfk_mcc)); rtw8852c_lck_init(rtwdev); + rtw8852c_dpk_init(rtwdev); rtw8852c_rck(rtwdev); rtw8852c_dack(rtwdev); @@ -1964,10 +1969,11 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) { + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = rtw89_8852c_tx_shape[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd]; + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; if (band == RTW89_BAND_2G) rtw8852c_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); @@ -1975,6 +1981,11 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev, rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev, (enum rtw89_mac_idx)phy_idx, tx_shape_ofdm); + + rtw89_phy_write32_set(rtwdev, R_P0_DAC_COMP_POST_DPD_EN, + B_P0_DAC_COMP_POST_DPD_EN); + rtw89_phy_write32_set(rtwdev, R_P1_DAC_COMP_POST_DPD_EN, + B_P1_DAC_COMP_POST_DPD_EN); } static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev, @@ -2142,7 +2153,8 @@ static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path) 1); rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); - rtw8852c_ctrl_btg(rtwdev, band == RTW89_BAND_2G); + rtw8852c_ctrl_btg_bt_rx(rtwdev, band == RTW89_BAND_2G, + RTW89_PHY_0); rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, @@ -2218,9 +2230,10 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, } } -static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8852c_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (bt_en) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1, B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x3); rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1, @@ -2338,9 +2351,10 @@ static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev) } } -static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8852c_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, B_PATH0_BT_SHARE_V1, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, @@ -2650,15 +2664,15 @@ static void rtw8852c_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) switch (level) { case 0: /* original */ default: - rtw8852c_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852c_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 1: /* for FDD free-run */ - rtw8852c_bb_ctrl_btc_preagc(rtwdev, true); + rtw8852c_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 2: /* for BTG Co-Rx*/ - rtw8852c_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852c_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 1; break; } @@ -2743,6 +2757,10 @@ static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev) return 0; } +static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = { + .callbacks[RTW89_CHANCTX_CALLBACK_RFK] = rtw8852c_rfk_chanctx_cb, +}; + #ifdef CONFIG_PM static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, @@ -2755,6 +2773,7 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = { static const struct rtw89_chip_ops rtw8852c_chip_ops = { .enable_bb_rf = rtw8852c_mac_enable_bb_rf, .disable_bb_rf = rtw8852c_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8852c_bb_reset, .bb_sethw = rtw8852c_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, @@ -2775,9 +2794,9 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl, .init_txpwr_unit = rtw8852c_init_txpwr_unit, .get_thermal = rtw8852c_get_thermal, - .ctrl_btg = rtw8852c_ctrl_btg, + .ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx, .query_ppdu = rtw8852c_query_ppdu, - .bb_ctrl_btc_preagc = rtw8852c_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852c_pwr_on_func, @@ -2811,6 +2830,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .fw_basename = RTW8852C_FW_BASENAME, .fw_format_max = RTW8852C_FW_FORMAT_MAX, .try_ce_fw = false, + .bbmcu_nr = 0, .needed_fw_elms = 0, .fifo_size = 458752, .small_fifo_size = false, @@ -2831,21 +2851,22 @@ const struct rtw89_chip_info rtw8852c_chip_info = { &rtw89_8852c_phy_radioa_table,}, .nctl_table = &rtw89_8852c_phy_nctl_table, .nctl_post_table = NULL, - .byr_table = &rtw89_8852c_byr_table, .dflt_parms = &rtw89_8852c_dflt_parms, .rfe_parms_conf = NULL, + .chanctx_listener = &rtw8852c_chanctx_listener, .txpwr_factor_rf = 2, .txpwr_factor_mac = 1, .dig_table = NULL, .dig_regs = &rtw8852c_dig_regs, .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table, - .support_chanctx_num = 1, + .support_chanctx_num = 2, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | BIT(NL80211_BAND_6GHZ), .support_bw160 = true, .support_unii4 = true, - .support_ul_tb_ctrl = false, + .ul_tb_waveform_ctrl = false, + .ul_tb_pwr_diff = true, .hw_sec_hdr = true, .rf_path_num = 2, .tx_nss = 2, @@ -2889,6 +2910,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1, .h2c_desc_size = sizeof(struct rtw89_rxdesc_short), .txwd_body_size = sizeof(struct rtw89_txwd_body_v1), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8852c_h2c_regs, @@ -2902,6 +2924,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dcfo_comp_sft = 12, .imr_info = &rtw8852c_imr_info, .rrsr_cfgs = &rtw8852c_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP, .dma_ch_mask = 0, .edcca_lvl_reg = R_SEG0R_EDCCA_LVL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index de7714f871..654e3e5507 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2022 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "debug.h" #include "phy.h" @@ -2893,18 +2894,37 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_bandwidth bw = chan->band_width; enum rtw89_band band = chan->band_type; + u32 clk = 0x0; rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl); - if (path == RF_PATH_A) + switch (bw) { + case RTW89_CHANNEL_WIDTH_80: + clk = 0x1; + break; + case RTW89_CHANNEL_WIDTH_80_80: + case RTW89_CHANNEL_WIDTH_160: + clk = 0x2; + break; + default: + break; + } + + if (path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ADC_CLK, + B_P0_TSSI_ADC_CLK, clk); rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, &rtw8852c_tssi_sys_defs_2g_a_tbl, &rtw8852c_tssi_sys_defs_5g_a_tbl); - else + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ADC_CLK, + B_P1_TSSI_ADC_CLK, clk); rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, &rtw8852c_tssi_sys_defs_2g_b_tbl, &rtw8852c_tssi_sys_defs_5g_b_tbl); + } } static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, @@ -4049,21 +4069,53 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; - u8 idx = rfk_mcc->table_idx; - int i; + DECLARE_BITMAP(map, RTW89_IQK_CHS_NR) = {}; + const struct rtw89_chan *chan; + enum rtw89_entity_mode mode; + u8 chan_idx; + u8 idx; + u8 i; - for (i = 0; i < RTW89_IQK_CHS_NR; i++) { - if (rfk_mcc->ch[idx] == 0) - break; - if (++idx >= RTW89_IQK_CHS_NR) - idx = 0; + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC_PREPARE: + chan_idx = RTW89_SUB_ENTITY_1; + break; + default: + chan_idx = RTW89_SUB_ENTITY_0; + break; + } + + for (i = 0; i <= chan_idx; i++) { + chan = rtw89_chan_get(rtwdev, i); + + for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { + if (rfk_mcc->ch[idx] == chan->channel && + rfk_mcc->band[idx] == chan->band_type) { + if (i != chan_idx) { + set_bit(idx, map); + break; + } + + goto bottom; + } + } + } + + idx = find_first_zero_bit(map, RTW89_IQK_CHS_NR); + if (idx == RTW89_IQK_CHS_NR) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "%s: no empty rfk table; force replace the first\n", + __func__); + idx = 0; } - rfk_mcc->table_idx = idx; rfk_mcc->ch[idx] = chan->channel; rfk_mcc->band[idx] = chan->band_type; + +bottom: + rfk_mcc->table_idx = idx; } void rtw8852c_rck(struct rtw89_dev *rtwdev) @@ -4213,6 +4265,14 @@ trigger_rx_dck: rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); } +void rtw8852c_dpk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + dpk->is_dpk_enable = true; + dpk->is_dpk_reload_en = false; +} + void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { u32 tx_en; @@ -4222,8 +4282,6 @@ void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); - rtwdev->dpk.is_dpk_enable = true; - rtwdev->dpk.is_dpk_reload_en = false; _dpk(rtwdev, phy_idx, false); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); @@ -4361,3 +4419,26 @@ void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, else rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false); } + +void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 path; + + switch (state) { + case RTW89_CHANCTX_STATE_MCC_START: + dpk->is_dpk_enable = false; + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) + _dpk_onoff(rtwdev, path, false); + break; + case RTW89_CHANCTX_STATE_MCC_STOP: + dpk->is_dpk_enable = true; + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) + _dpk_onoff(rtwdev, path, false); + rtw8852c_dpk(rtwdev, RTW89_PHY_0); + break; + default: + break; + } +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index 928a587cdd..6605137e61 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -13,6 +13,7 @@ void rtw8852c_dack(struct rtw89_dev *rtwdev); void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe); void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev); +void rtw8852c_dpk_init(struct rtw89_dev *rtwdev); void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_dpk_track(struct rtw89_dev *rtwdev); void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); @@ -25,5 +26,7 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void rtw8852c_lck_init(struct rtw89_dev *rtwdev); void rtw8852c_lck_track(struct rtw89_dev *rtwdev); +void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state); #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c index d727d528b3..e5b0c2a686 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c @@ -165,11 +165,11 @@ static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs[] = { RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0xb5b5), RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0xb5b5), RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16), - RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1f19), - RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x1c), + RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1313), + RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x13), RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041), - RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041), - RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x2001), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x00410041), + RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x0041), RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3), RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3), RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e), @@ -222,7 +222,7 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000), RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x026d000), RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00), - RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c18e8), RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x3dc80280), RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00000080), RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03), @@ -251,7 +251,7 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100), RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c), RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f), - RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0xc00), RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff), RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000), RTW89_DECL_RFK_WM(0x58f8, 0x000fffff, 0x000), @@ -260,14 +260,14 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_a); static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = { - RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x766c, 0x00001000, 0x0), RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f), RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40), RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040), RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000), RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x026d000), RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00), - RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c18e8), RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x3dc80280), RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00000080), RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03), @@ -296,7 +296,7 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = { RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100), RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c), RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f), - RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0xc00), RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff), RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000), RTW89_DECL_RFK_WM(0x78f8, 0x000fffff, 0x000), @@ -511,9 +511,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_a[] = RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x07d), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_a); @@ -531,9 +531,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_b[] = RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x07d), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_b); @@ -551,9 +551,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_a[] = RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x080), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_a); @@ -571,9 +571,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_b[] = RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x080), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_b); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c index 4b272fdf1f..ab1a0aadc8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -31525,8 +31525,9 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 }; -const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM] = { +static +const u8 rtw89_8852c_tx_shape_lmt[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { [0][0][RTW89_ACMA] = 0, [0][0][RTW89_CHILE] = 0, [0][0][RTW89_CN] = 0, @@ -31537,6 +31538,7 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [0][0][RTW89_MEXICO] = 1, [0][0][RTW89_MKK] = 0, [0][0][RTW89_QATAR] = 0, + [0][0][RTW89_THAILAND] = 0, [0][0][RTW89_UK] = 0, [0][0][RTW89_UKRAINE] = 0, [0][1][RTW89_ACMA] = 0, @@ -31549,6 +31551,7 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [0][1][RTW89_MEXICO] = 3, [0][1][RTW89_MKK] = 0, [0][1][RTW89_QATAR] = 0, + [0][1][RTW89_THAILAND] = 0, [0][1][RTW89_UK] = 0, [0][1][RTW89_UKRAINE] = 0, [1][1][RTW89_ACMA] = 0, @@ -31561,6 +31564,7 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [1][1][RTW89_MEXICO] = 3, [1][1][RTW89_MKK] = 0, [1][1][RTW89_QATAR] = 0, + [1][1][RTW89_THAILAND] = 0, [1][1][RTW89_UK] = 0, [1][1][RTW89_UKRAINE] = 0, [2][1][RTW89_ACMA] = 0, @@ -31571,25 +31575,66 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [2][1][RTW89_KCC] = 0, [2][1][RTW89_MKK] = 0, [2][1][RTW89_QATAR] = 0, + [2][1][RTW89_THAILAND] = 0, [2][1][RTW89_UK] = 0, }; +static +const u8 rtw89_8852c_tx_shape_lmt_ru[RTW89_BAND_NUM][RTW89_REGD_NUM] = { + [0][RTW89_ACMA] = 0, + [0][RTW89_CHILE] = 0, + [0][RTW89_CN] = 0, + [0][RTW89_ETSI] = 0, + [0][RTW89_FCC] = 3, + [0][RTW89_IC] = 3, + [0][RTW89_KCC] = 0, + [0][RTW89_MEXICO] = 3, + [0][RTW89_MKK] = 0, + [0][RTW89_QATAR] = 0, + [0][RTW89_THAILAND] = 0, + [0][RTW89_UK] = 0, + [0][RTW89_UKRAINE] = 0, + [1][RTW89_ACMA] = 0, + [1][RTW89_CHILE] = 0, + [1][RTW89_CN] = 0, + [1][RTW89_ETSI] = 0, + [1][RTW89_FCC] = 3, + [1][RTW89_IC] = 3, + [1][RTW89_KCC] = 0, + [1][RTW89_MEXICO] = 3, + [1][RTW89_MKK] = 0, + [1][RTW89_QATAR] = 0, + [1][RTW89_THAILAND] = 0, + [1][RTW89_UK] = 0, + [1][RTW89_UKRAINE] = 0, + [2][RTW89_ACMA] = 0, + [2][RTW89_CHILE] = 0, + [2][RTW89_ETSI] = 0, + [2][RTW89_FCC] = 0, + [2][RTW89_IC] = 0, + [2][RTW89_KCC] = 0, + [2][RTW89_MKK] = 0, + [2][RTW89_QATAR] = 0, + [2][RTW89_THAILAND] = 0, + [2][RTW89_UK] = 0, +}; + static const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][RTW89_WW][0] = 58, - [0][0][0][0][RTW89_WW][1] = 58, - [0][0][0][0][RTW89_WW][2] = 58, - [0][0][0][0][RTW89_WW][3] = 58, - [0][0][0][0][RTW89_WW][4] = 58, - [0][0][0][0][RTW89_WW][5] = 58, - [0][0][0][0][RTW89_WW][6] = 58, - [0][0][0][0][RTW89_WW][7] = 58, - [0][0][0][0][RTW89_WW][8] = 58, - [0][0][0][0][RTW89_WW][9] = 58, - [0][0][0][0][RTW89_WW][10] = 58, - [0][0][0][0][RTW89_WW][11] = 58, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, [0][0][0][0][RTW89_WW][12] = 46, [0][0][0][0][RTW89_WW][13] = 72, [0][1][0][0][RTW89_WW][0] = 42, @@ -31609,9 +31654,9 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][0] = 0, [1][0][0][0][RTW89_WW][1] = 0, [1][0][0][0][RTW89_WW][2] = 44, - [1][0][0][0][RTW89_WW][3] = 58, - [1][0][0][0][RTW89_WW][4] = 58, - [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][3] = 56, + [1][0][0][0][RTW89_WW][4] = 56, + [1][0][0][0][RTW89_WW][5] = 56, [1][0][0][0][RTW89_WW][6] = 46, [1][0][0][0][RTW89_WW][7] = 46, [1][0][0][0][RTW89_WW][8] = 28, @@ -31622,10 +31667,10 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][13] = 0, [1][1][0][0][RTW89_WW][0] = 0, [1][1][0][0][RTW89_WW][1] = 0, - [1][1][0][0][RTW89_WW][2] = 46, - [1][1][0][0][RTW89_WW][3] = 46, - [1][1][0][0][RTW89_WW][4] = 46, - [1][1][0][0][RTW89_WW][5] = 46, + [1][1][0][0][RTW89_WW][2] = 44, + [1][1][0][0][RTW89_WW][3] = 44, + [1][1][0][0][RTW89_WW][4] = 44, + [1][1][0][0][RTW89_WW][5] = 44, [1][1][0][0][RTW89_WW][6] = 40, [1][1][0][0][RTW89_WW][7] = 40, [1][1][0][0][RTW89_WW][8] = 14, @@ -31646,7 +31691,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][9] = 58, [0][0][1][0][RTW89_WW][10] = 58, [0][0][1][0][RTW89_WW][11] = 58, - [0][0][1][0][RTW89_WW][12] = 58, + [0][0][1][0][RTW89_WW][12] = 40, [0][0][1][0][RTW89_WW][13] = 0, [0][1][1][0][RTW89_WW][0] = 46, [0][1][1][0][RTW89_WW][1] = 46, @@ -31690,7 +31735,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_WW][11] = 46, [0][1][2][0][RTW89_WW][12] = 16, [0][1][2][0][RTW89_WW][13] = 0, - [0][1][2][1][RTW89_WW][0] = 36, + [0][1][2][1][RTW89_WW][0] = 34, [0][1][2][1][RTW89_WW][1] = 34, [0][1][2][1][RTW89_WW][2] = 34, [0][1][2][1][RTW89_WW][3] = 34, @@ -31742,7 +31787,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][7] = 34, [1][1][2][1][RTW89_WW][8] = 34, [1][1][2][1][RTW89_WW][9] = 34, - [1][1][2][1][RTW89_WW][10] = 36, + [1][1][2][1][RTW89_WW][10] = 34, [1][1][2][1][RTW89_WW][11] = 0, [1][1][2][1][RTW89_WW][12] = 0, [1][1][2][1][RTW89_WW][13] = 0, @@ -31752,156 +31797,169 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][0] = 76, [0][0][0][0][RTW89_KCC][0] = 68, [0][0][0][0][RTW89_ACMA][0] = 60, - [0][0][0][0][RTW89_CN][0] = 58, + [0][0][0][0][RTW89_CN][0] = 56, [0][0][0][0][RTW89_UK][0] = 60, [0][0][0][0][RTW89_MEXICO][0] = 76, [0][0][0][0][RTW89_UKRAINE][0] = 60, [0][0][0][0][RTW89_CHILE][0] = 76, [0][0][0][0][RTW89_QATAR][0] = 60, + [0][0][0][0][RTW89_THAILAND][0] = 60, [0][0][0][0][RTW89_FCC][1] = 76, [0][0][0][0][RTW89_ETSI][1] = 60, [0][0][0][0][RTW89_MKK][1] = 68, [0][0][0][0][RTW89_IC][1] = 76, [0][0][0][0][RTW89_KCC][1] = 68, [0][0][0][0][RTW89_ACMA][1] = 60, - [0][0][0][0][RTW89_CN][1] = 58, + [0][0][0][0][RTW89_CN][1] = 56, [0][0][0][0][RTW89_UK][1] = 60, [0][0][0][0][RTW89_MEXICO][1] = 76, [0][0][0][0][RTW89_UKRAINE][1] = 60, [0][0][0][0][RTW89_CHILE][1] = 68, [0][0][0][0][RTW89_QATAR][1] = 60, + [0][0][0][0][RTW89_THAILAND][1] = 60, [0][0][0][0][RTW89_FCC][2] = 76, [0][0][0][0][RTW89_ETSI][2] = 60, [0][0][0][0][RTW89_MKK][2] = 68, [0][0][0][0][RTW89_IC][2] = 76, [0][0][0][0][RTW89_KCC][2] = 68, [0][0][0][0][RTW89_ACMA][2] = 60, - [0][0][0][0][RTW89_CN][2] = 58, + [0][0][0][0][RTW89_CN][2] = 56, [0][0][0][0][RTW89_UK][2] = 60, [0][0][0][0][RTW89_MEXICO][2] = 76, [0][0][0][0][RTW89_UKRAINE][2] = 60, [0][0][0][0][RTW89_CHILE][2] = 68, [0][0][0][0][RTW89_QATAR][2] = 60, + [0][0][0][0][RTW89_THAILAND][2] = 60, [0][0][0][0][RTW89_FCC][3] = 76, [0][0][0][0][RTW89_ETSI][3] = 60, [0][0][0][0][RTW89_MKK][3] = 68, [0][0][0][0][RTW89_IC][3] = 76, [0][0][0][0][RTW89_KCC][3] = 68, [0][0][0][0][RTW89_ACMA][3] = 60, - [0][0][0][0][RTW89_CN][3] = 58, + [0][0][0][0][RTW89_CN][3] = 56, [0][0][0][0][RTW89_UK][3] = 60, [0][0][0][0][RTW89_MEXICO][3] = 76, [0][0][0][0][RTW89_UKRAINE][3] = 60, [0][0][0][0][RTW89_CHILE][3] = 68, [0][0][0][0][RTW89_QATAR][3] = 60, + [0][0][0][0][RTW89_THAILAND][3] = 60, [0][0][0][0][RTW89_FCC][4] = 76, [0][0][0][0][RTW89_ETSI][4] = 60, [0][0][0][0][RTW89_MKK][4] = 68, [0][0][0][0][RTW89_IC][4] = 76, [0][0][0][0][RTW89_KCC][4] = 68, [0][0][0][0][RTW89_ACMA][4] = 60, - [0][0][0][0][RTW89_CN][4] = 58, + [0][0][0][0][RTW89_CN][4] = 56, [0][0][0][0][RTW89_UK][4] = 60, [0][0][0][0][RTW89_MEXICO][4] = 76, [0][0][0][0][RTW89_UKRAINE][4] = 60, [0][0][0][0][RTW89_CHILE][4] = 68, [0][0][0][0][RTW89_QATAR][4] = 60, + [0][0][0][0][RTW89_THAILAND][4] = 60, [0][0][0][0][RTW89_FCC][5] = 76, [0][0][0][0][RTW89_ETSI][5] = 60, [0][0][0][0][RTW89_MKK][5] = 68, [0][0][0][0][RTW89_IC][5] = 76, [0][0][0][0][RTW89_KCC][5] = 68, [0][0][0][0][RTW89_ACMA][5] = 60, - [0][0][0][0][RTW89_CN][5] = 58, + [0][0][0][0][RTW89_CN][5] = 56, [0][0][0][0][RTW89_UK][5] = 60, [0][0][0][0][RTW89_MEXICO][5] = 76, [0][0][0][0][RTW89_UKRAINE][5] = 60, [0][0][0][0][RTW89_CHILE][5] = 76, [0][0][0][0][RTW89_QATAR][5] = 60, + [0][0][0][0][RTW89_THAILAND][5] = 60, [0][0][0][0][RTW89_FCC][6] = 76, [0][0][0][0][RTW89_ETSI][6] = 60, [0][0][0][0][RTW89_MKK][6] = 68, [0][0][0][0][RTW89_IC][6] = 76, [0][0][0][0][RTW89_KCC][6] = 68, [0][0][0][0][RTW89_ACMA][6] = 60, - [0][0][0][0][RTW89_CN][6] = 58, + [0][0][0][0][RTW89_CN][6] = 56, [0][0][0][0][RTW89_UK][6] = 60, [0][0][0][0][RTW89_MEXICO][6] = 76, [0][0][0][0][RTW89_UKRAINE][6] = 60, [0][0][0][0][RTW89_CHILE][6] = 76, [0][0][0][0][RTW89_QATAR][6] = 60, + [0][0][0][0][RTW89_THAILAND][6] = 60, [0][0][0][0][RTW89_FCC][7] = 76, [0][0][0][0][RTW89_ETSI][7] = 60, [0][0][0][0][RTW89_MKK][7] = 68, [0][0][0][0][RTW89_IC][7] = 76, [0][0][0][0][RTW89_KCC][7] = 68, [0][0][0][0][RTW89_ACMA][7] = 60, - [0][0][0][0][RTW89_CN][7] = 58, + [0][0][0][0][RTW89_CN][7] = 56, [0][0][0][0][RTW89_UK][7] = 60, [0][0][0][0][RTW89_MEXICO][7] = 76, [0][0][0][0][RTW89_UKRAINE][7] = 60, [0][0][0][0][RTW89_CHILE][7] = 76, [0][0][0][0][RTW89_QATAR][7] = 60, + [0][0][0][0][RTW89_THAILAND][7] = 60, [0][0][0][0][RTW89_FCC][8] = 76, [0][0][0][0][RTW89_ETSI][8] = 60, [0][0][0][0][RTW89_MKK][8] = 68, [0][0][0][0][RTW89_IC][8] = 76, [0][0][0][0][RTW89_KCC][8] = 68, [0][0][0][0][RTW89_ACMA][8] = 60, - [0][0][0][0][RTW89_CN][8] = 58, + [0][0][0][0][RTW89_CN][8] = 56, [0][0][0][0][RTW89_UK][8] = 60, [0][0][0][0][RTW89_MEXICO][8] = 76, [0][0][0][0][RTW89_UKRAINE][8] = 60, [0][0][0][0][RTW89_CHILE][8] = 76, [0][0][0][0][RTW89_QATAR][8] = 60, + [0][0][0][0][RTW89_THAILAND][8] = 60, [0][0][0][0][RTW89_FCC][9] = 76, [0][0][0][0][RTW89_ETSI][9] = 60, [0][0][0][0][RTW89_MKK][9] = 68, [0][0][0][0][RTW89_IC][9] = 76, [0][0][0][0][RTW89_KCC][9] = 70, [0][0][0][0][RTW89_ACMA][9] = 60, - [0][0][0][0][RTW89_CN][9] = 58, + [0][0][0][0][RTW89_CN][9] = 56, [0][0][0][0][RTW89_UK][9] = 60, [0][0][0][0][RTW89_MEXICO][9] = 76, [0][0][0][0][RTW89_UKRAINE][9] = 60, [0][0][0][0][RTW89_CHILE][9] = 76, [0][0][0][0][RTW89_QATAR][9] = 60, + [0][0][0][0][RTW89_THAILAND][9] = 60, [0][0][0][0][RTW89_FCC][10] = 76, [0][0][0][0][RTW89_ETSI][10] = 60, [0][0][0][0][RTW89_MKK][10] = 68, [0][0][0][0][RTW89_IC][10] = 76, [0][0][0][0][RTW89_KCC][10] = 70, [0][0][0][0][RTW89_ACMA][10] = 60, - [0][0][0][0][RTW89_CN][10] = 58, + [0][0][0][0][RTW89_CN][10] = 56, [0][0][0][0][RTW89_UK][10] = 60, [0][0][0][0][RTW89_MEXICO][10] = 76, [0][0][0][0][RTW89_UKRAINE][10] = 60, [0][0][0][0][RTW89_CHILE][10] = 76, [0][0][0][0][RTW89_QATAR][10] = 60, + [0][0][0][0][RTW89_THAILAND][10] = 60, [0][0][0][0][RTW89_FCC][11] = 58, [0][0][0][0][RTW89_ETSI][11] = 60, [0][0][0][0][RTW89_MKK][11] = 68, [0][0][0][0][RTW89_IC][11] = 58, [0][0][0][0][RTW89_KCC][11] = 70, [0][0][0][0][RTW89_ACMA][11] = 60, - [0][0][0][0][RTW89_CN][11] = 58, + [0][0][0][0][RTW89_CN][11] = 56, [0][0][0][0][RTW89_UK][11] = 60, [0][0][0][0][RTW89_MEXICO][11] = 58, [0][0][0][0][RTW89_UKRAINE][11] = 60, [0][0][0][0][RTW89_CHILE][11] = 58, [0][0][0][0][RTW89_QATAR][11] = 60, + [0][0][0][0][RTW89_THAILAND][11] = 60, [0][0][0][0][RTW89_FCC][12] = 46, [0][0][0][0][RTW89_ETSI][12] = 60, [0][0][0][0][RTW89_MKK][12] = 68, [0][0][0][0][RTW89_IC][12] = 46, [0][0][0][0][RTW89_KCC][12] = 70, [0][0][0][0][RTW89_ACMA][12] = 60, - [0][0][0][0][RTW89_CN][12] = 58, + [0][0][0][0][RTW89_CN][12] = 56, [0][0][0][0][RTW89_UK][12] = 60, [0][0][0][0][RTW89_MEXICO][12] = 46, [0][0][0][0][RTW89_UKRAINE][12] = 60, [0][0][0][0][RTW89_CHILE][12] = 46, [0][0][0][0][RTW89_QATAR][12] = 60, + [0][0][0][0][RTW89_THAILAND][12] = 60, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, [0][0][0][0][RTW89_MKK][13] = 72, @@ -31914,6 +31972,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_UKRAINE][13] = 127, [0][0][0][0][RTW89_CHILE][13] = 127, [0][0][0][0][RTW89_QATAR][13] = 127, + [0][0][0][0][RTW89_THAILAND][13] = 127, [0][1][0][0][RTW89_FCC][0] = 76, [0][1][0][0][RTW89_ETSI][0] = 48, [0][1][0][0][RTW89_MKK][0] = 58, @@ -31926,6 +31985,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][0] = 48, [0][1][0][0][RTW89_CHILE][0] = 76, [0][1][0][0][RTW89_QATAR][0] = 48, + [0][1][0][0][RTW89_THAILAND][0] = 48, [0][1][0][0][RTW89_FCC][1] = 76, [0][1][0][0][RTW89_ETSI][1] = 48, [0][1][0][0][RTW89_MKK][1] = 58, @@ -31938,6 +31998,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][1] = 48, [0][1][0][0][RTW89_CHILE][1] = 54, [0][1][0][0][RTW89_QATAR][1] = 48, + [0][1][0][0][RTW89_THAILAND][1] = 48, [0][1][0][0][RTW89_FCC][2] = 76, [0][1][0][0][RTW89_ETSI][2] = 48, [0][1][0][0][RTW89_MKK][2] = 58, @@ -31950,6 +32011,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][2] = 48, [0][1][0][0][RTW89_CHILE][2] = 54, [0][1][0][0][RTW89_QATAR][2] = 48, + [0][1][0][0][RTW89_THAILAND][2] = 48, [0][1][0][0][RTW89_FCC][3] = 76, [0][1][0][0][RTW89_ETSI][3] = 48, [0][1][0][0][RTW89_MKK][3] = 58, @@ -31962,6 +32024,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][3] = 48, [0][1][0][0][RTW89_CHILE][3] = 54, [0][1][0][0][RTW89_QATAR][3] = 48, + [0][1][0][0][RTW89_THAILAND][3] = 48, [0][1][0][0][RTW89_FCC][4] = 76, [0][1][0][0][RTW89_ETSI][4] = 48, [0][1][0][0][RTW89_MKK][4] = 58, @@ -31974,6 +32037,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][4] = 48, [0][1][0][0][RTW89_CHILE][4] = 54, [0][1][0][0][RTW89_QATAR][4] = 48, + [0][1][0][0][RTW89_THAILAND][4] = 48, [0][1][0][0][RTW89_FCC][5] = 76, [0][1][0][0][RTW89_ETSI][5] = 48, [0][1][0][0][RTW89_MKK][5] = 58, @@ -31986,6 +32050,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][5] = 48, [0][1][0][0][RTW89_CHILE][5] = 76, [0][1][0][0][RTW89_QATAR][5] = 48, + [0][1][0][0][RTW89_THAILAND][5] = 48, [0][1][0][0][RTW89_FCC][6] = 76, [0][1][0][0][RTW89_ETSI][6] = 48, [0][1][0][0][RTW89_MKK][6] = 58, @@ -31998,6 +32063,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][6] = 48, [0][1][0][0][RTW89_CHILE][6] = 76, [0][1][0][0][RTW89_QATAR][6] = 48, + [0][1][0][0][RTW89_THAILAND][6] = 48, [0][1][0][0][RTW89_FCC][7] = 76, [0][1][0][0][RTW89_ETSI][7] = 48, [0][1][0][0][RTW89_MKK][7] = 58, @@ -32010,6 +32076,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][7] = 48, [0][1][0][0][RTW89_CHILE][7] = 76, [0][1][0][0][RTW89_QATAR][7] = 48, + [0][1][0][0][RTW89_THAILAND][7] = 48, [0][1][0][0][RTW89_FCC][8] = 76, [0][1][0][0][RTW89_ETSI][8] = 48, [0][1][0][0][RTW89_MKK][8] = 58, @@ -32022,6 +32089,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][8] = 48, [0][1][0][0][RTW89_CHILE][8] = 76, [0][1][0][0][RTW89_QATAR][8] = 48, + [0][1][0][0][RTW89_THAILAND][8] = 48, [0][1][0][0][RTW89_FCC][9] = 70, [0][1][0][0][RTW89_ETSI][9] = 48, [0][1][0][0][RTW89_MKK][9] = 58, @@ -32034,6 +32102,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][9] = 48, [0][1][0][0][RTW89_CHILE][9] = 70, [0][1][0][0][RTW89_QATAR][9] = 48, + [0][1][0][0][RTW89_THAILAND][9] = 48, [0][1][0][0][RTW89_FCC][10] = 72, [0][1][0][0][RTW89_ETSI][10] = 48, [0][1][0][0][RTW89_MKK][10] = 58, @@ -32046,6 +32115,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][10] = 48, [0][1][0][0][RTW89_CHILE][10] = 72, [0][1][0][0][RTW89_QATAR][10] = 48, + [0][1][0][0][RTW89_THAILAND][10] = 48, [0][1][0][0][RTW89_FCC][11] = 44, [0][1][0][0][RTW89_ETSI][11] = 48, [0][1][0][0][RTW89_MKK][11] = 58, @@ -32058,6 +32128,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][11] = 48, [0][1][0][0][RTW89_CHILE][11] = 44, [0][1][0][0][RTW89_QATAR][11] = 48, + [0][1][0][0][RTW89_THAILAND][11] = 48, [0][1][0][0][RTW89_FCC][12] = 18, [0][1][0][0][RTW89_ETSI][12] = 48, [0][1][0][0][RTW89_MKK][12] = 58, @@ -32070,6 +32141,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][12] = 48, [0][1][0][0][RTW89_CHILE][12] = 18, [0][1][0][0][RTW89_QATAR][12] = 48, + [0][1][0][0][RTW89_THAILAND][12] = 48, [0][1][0][0][RTW89_FCC][13] = 127, [0][1][0][0][RTW89_ETSI][13] = 127, [0][1][0][0][RTW89_MKK][13] = 60, @@ -32082,6 +32154,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][13] = 127, [0][1][0][0][RTW89_CHILE][13] = 127, [0][1][0][0][RTW89_QATAR][13] = 127, + [0][1][0][0][RTW89_THAILAND][13] = 127, [1][0][0][0][RTW89_FCC][0] = 127, [1][0][0][0][RTW89_ETSI][0] = 127, [1][0][0][0][RTW89_MKK][0] = 127, @@ -32094,6 +32167,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][0] = 127, [1][0][0][0][RTW89_CHILE][0] = 127, [1][0][0][0][RTW89_QATAR][0] = 127, + [1][0][0][0][RTW89_THAILAND][0] = 127, [1][0][0][0][RTW89_FCC][1] = 127, [1][0][0][0][RTW89_ETSI][1] = 127, [1][0][0][0][RTW89_MKK][1] = 127, @@ -32106,114 +32180,124 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][1] = 127, [1][0][0][0][RTW89_CHILE][1] = 127, [1][0][0][0][RTW89_QATAR][1] = 127, + [1][0][0][0][RTW89_THAILAND][1] = 127, [1][0][0][0][RTW89_FCC][2] = 44, [1][0][0][0][RTW89_ETSI][2] = 60, [1][0][0][0][RTW89_MKK][2] = 66, [1][0][0][0][RTW89_IC][2] = 44, [1][0][0][0][RTW89_KCC][2] = 68, [1][0][0][0][RTW89_ACMA][2] = 60, - [1][0][0][0][RTW89_CN][2] = 58, + [1][0][0][0][RTW89_CN][2] = 56, [1][0][0][0][RTW89_UK][2] = 60, [1][0][0][0][RTW89_MEXICO][2] = 44, [1][0][0][0][RTW89_UKRAINE][2] = 60, [1][0][0][0][RTW89_CHILE][2] = 44, [1][0][0][0][RTW89_QATAR][2] = 60, + [1][0][0][0][RTW89_THAILAND][2] = 60, [1][0][0][0][RTW89_FCC][3] = 60, [1][0][0][0][RTW89_ETSI][3] = 60, [1][0][0][0][RTW89_MKK][3] = 66, [1][0][0][0][RTW89_IC][3] = 60, [1][0][0][0][RTW89_KCC][3] = 68, [1][0][0][0][RTW89_ACMA][3] = 60, - [1][0][0][0][RTW89_CN][3] = 58, + [1][0][0][0][RTW89_CN][3] = 56, [1][0][0][0][RTW89_UK][3] = 60, [1][0][0][0][RTW89_MEXICO][3] = 60, [1][0][0][0][RTW89_UKRAINE][3] = 60, [1][0][0][0][RTW89_CHILE][3] = 60, [1][0][0][0][RTW89_QATAR][3] = 60, + [1][0][0][0][RTW89_THAILAND][3] = 60, [1][0][0][0][RTW89_FCC][4] = 60, [1][0][0][0][RTW89_ETSI][4] = 60, [1][0][0][0][RTW89_MKK][4] = 66, [1][0][0][0][RTW89_IC][4] = 60, [1][0][0][0][RTW89_KCC][4] = 68, [1][0][0][0][RTW89_ACMA][4] = 60, - [1][0][0][0][RTW89_CN][4] = 58, + [1][0][0][0][RTW89_CN][4] = 56, [1][0][0][0][RTW89_UK][4] = 60, [1][0][0][0][RTW89_MEXICO][4] = 60, [1][0][0][0][RTW89_UKRAINE][4] = 60, [1][0][0][0][RTW89_CHILE][4] = 60, [1][0][0][0][RTW89_QATAR][4] = 60, + [1][0][0][0][RTW89_THAILAND][4] = 60, [1][0][0][0][RTW89_FCC][5] = 62, [1][0][0][0][RTW89_ETSI][5] = 60, [1][0][0][0][RTW89_MKK][5] = 66, [1][0][0][0][RTW89_IC][5] = 62, [1][0][0][0][RTW89_KCC][5] = 68, [1][0][0][0][RTW89_ACMA][5] = 60, - [1][0][0][0][RTW89_CN][5] = 58, + [1][0][0][0][RTW89_CN][5] = 56, [1][0][0][0][RTW89_UK][5] = 60, [1][0][0][0][RTW89_MEXICO][5] = 62, [1][0][0][0][RTW89_UKRAINE][5] = 60, [1][0][0][0][RTW89_CHILE][5] = 62, [1][0][0][0][RTW89_QATAR][5] = 60, + [1][0][0][0][RTW89_THAILAND][5] = 60, [1][0][0][0][RTW89_FCC][6] = 46, [1][0][0][0][RTW89_ETSI][6] = 60, [1][0][0][0][RTW89_MKK][6] = 66, [1][0][0][0][RTW89_IC][6] = 46, [1][0][0][0][RTW89_KCC][6] = 68, [1][0][0][0][RTW89_ACMA][6] = 60, - [1][0][0][0][RTW89_CN][6] = 58, + [1][0][0][0][RTW89_CN][6] = 56, [1][0][0][0][RTW89_UK][6] = 60, [1][0][0][0][RTW89_MEXICO][6] = 46, [1][0][0][0][RTW89_UKRAINE][6] = 60, [1][0][0][0][RTW89_CHILE][6] = 46, [1][0][0][0][RTW89_QATAR][6] = 60, + [1][0][0][0][RTW89_THAILAND][6] = 60, [1][0][0][0][RTW89_FCC][7] = 46, [1][0][0][0][RTW89_ETSI][7] = 60, [1][0][0][0][RTW89_MKK][7] = 66, [1][0][0][0][RTW89_IC][7] = 46, [1][0][0][0][RTW89_KCC][7] = 68, [1][0][0][0][RTW89_ACMA][7] = 60, - [1][0][0][0][RTW89_CN][7] = 58, + [1][0][0][0][RTW89_CN][7] = 56, [1][0][0][0][RTW89_UK][7] = 60, [1][0][0][0][RTW89_MEXICO][7] = 46, [1][0][0][0][RTW89_UKRAINE][7] = 60, [1][0][0][0][RTW89_CHILE][7] = 46, [1][0][0][0][RTW89_QATAR][7] = 60, + [1][0][0][0][RTW89_THAILAND][7] = 60, [1][0][0][0][RTW89_FCC][8] = 28, [1][0][0][0][RTW89_ETSI][8] = 60, [1][0][0][0][RTW89_MKK][8] = 66, [1][0][0][0][RTW89_IC][8] = 28, [1][0][0][0][RTW89_KCC][8] = 70, [1][0][0][0][RTW89_ACMA][8] = 60, - [1][0][0][0][RTW89_CN][8] = 58, + [1][0][0][0][RTW89_CN][8] = 56, [1][0][0][0][RTW89_UK][8] = 60, [1][0][0][0][RTW89_MEXICO][8] = 28, [1][0][0][0][RTW89_UKRAINE][8] = 60, [1][0][0][0][RTW89_CHILE][8] = 28, [1][0][0][0][RTW89_QATAR][8] = 60, + [1][0][0][0][RTW89_THAILAND][8] = 60, [1][0][0][0][RTW89_FCC][9] = 26, [1][0][0][0][RTW89_ETSI][9] = 60, [1][0][0][0][RTW89_MKK][9] = 66, [1][0][0][0][RTW89_IC][9] = 26, [1][0][0][0][RTW89_KCC][9] = 70, [1][0][0][0][RTW89_ACMA][9] = 60, - [1][0][0][0][RTW89_CN][9] = 58, + [1][0][0][0][RTW89_CN][9] = 56, [1][0][0][0][RTW89_UK][9] = 60, [1][0][0][0][RTW89_MEXICO][9] = 26, [1][0][0][0][RTW89_UKRAINE][9] = 60, [1][0][0][0][RTW89_CHILE][9] = 26, [1][0][0][0][RTW89_QATAR][9] = 60, + [1][0][0][0][RTW89_THAILAND][9] = 60, [1][0][0][0][RTW89_FCC][10] = 26, [1][0][0][0][RTW89_ETSI][10] = 60, [1][0][0][0][RTW89_MKK][10] = 66, [1][0][0][0][RTW89_IC][10] = 26, [1][0][0][0][RTW89_KCC][10] = 70, [1][0][0][0][RTW89_ACMA][10] = 60, - [1][0][0][0][RTW89_CN][10] = 58, + [1][0][0][0][RTW89_CN][10] = 56, [1][0][0][0][RTW89_UK][10] = 60, [1][0][0][0][RTW89_MEXICO][10] = 26, [1][0][0][0][RTW89_UKRAINE][10] = 60, [1][0][0][0][RTW89_CHILE][10] = 26, [1][0][0][0][RTW89_QATAR][10] = 60, + [1][0][0][0][RTW89_THAILAND][10] = 60, [1][0][0][0][RTW89_FCC][11] = 127, [1][0][0][0][RTW89_ETSI][11] = 127, [1][0][0][0][RTW89_MKK][11] = 127, @@ -32226,6 +32310,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][11] = 127, [1][0][0][0][RTW89_CHILE][11] = 127, [1][0][0][0][RTW89_QATAR][11] = 127, + [1][0][0][0][RTW89_THAILAND][11] = 127, [1][0][0][0][RTW89_FCC][12] = 127, [1][0][0][0][RTW89_ETSI][12] = 127, [1][0][0][0][RTW89_MKK][12] = 127, @@ -32238,6 +32323,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][12] = 127, [1][0][0][0][RTW89_CHILE][12] = 127, [1][0][0][0][RTW89_QATAR][12] = 127, + [1][0][0][0][RTW89_THAILAND][12] = 127, [1][0][0][0][RTW89_FCC][13] = 127, [1][0][0][0][RTW89_ETSI][13] = 127, [1][0][0][0][RTW89_MKK][13] = 127, @@ -32250,6 +32336,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][13] = 127, [1][0][0][0][RTW89_CHILE][13] = 127, [1][0][0][0][RTW89_QATAR][13] = 127, + [1][0][0][0][RTW89_THAILAND][13] = 127, [1][1][0][0][RTW89_FCC][0] = 127, [1][1][0][0][RTW89_ETSI][0] = 127, [1][1][0][0][RTW89_MKK][0] = 127, @@ -32262,6 +32349,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][0] = 127, [1][1][0][0][RTW89_CHILE][0] = 127, [1][1][0][0][RTW89_QATAR][0] = 127, + [1][1][0][0][RTW89_THAILAND][0] = 127, [1][1][0][0][RTW89_FCC][1] = 127, [1][1][0][0][RTW89_ETSI][1] = 127, [1][1][0][0][RTW89_MKK][1] = 127, @@ -32274,114 +32362,124 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][1] = 127, [1][1][0][0][RTW89_CHILE][1] = 127, [1][1][0][0][RTW89_QATAR][1] = 127, + [1][1][0][0][RTW89_THAILAND][1] = 127, [1][1][0][0][RTW89_FCC][2] = 46, [1][1][0][0][RTW89_ETSI][2] = 48, [1][1][0][0][RTW89_MKK][2] = 58, [1][1][0][0][RTW89_IC][2] = 46, [1][1][0][0][RTW89_KCC][2] = 56, [1][1][0][0][RTW89_ACMA][2] = 48, - [1][1][0][0][RTW89_CN][2] = 46, + [1][1][0][0][RTW89_CN][2] = 44, [1][1][0][0][RTW89_UK][2] = 48, [1][1][0][0][RTW89_MEXICO][2] = 46, [1][1][0][0][RTW89_UKRAINE][2] = 48, [1][1][0][0][RTW89_CHILE][2] = 46, [1][1][0][0][RTW89_QATAR][2] = 48, + [1][1][0][0][RTW89_THAILAND][2] = 48, [1][1][0][0][RTW89_FCC][3] = 46, [1][1][0][0][RTW89_ETSI][3] = 48, [1][1][0][0][RTW89_MKK][3] = 58, [1][1][0][0][RTW89_IC][3] = 46, [1][1][0][0][RTW89_KCC][3] = 56, [1][1][0][0][RTW89_ACMA][3] = 48, - [1][1][0][0][RTW89_CN][3] = 46, + [1][1][0][0][RTW89_CN][3] = 44, [1][1][0][0][RTW89_UK][3] = 48, [1][1][0][0][RTW89_MEXICO][3] = 46, [1][1][0][0][RTW89_UKRAINE][3] = 48, [1][1][0][0][RTW89_CHILE][3] = 46, [1][1][0][0][RTW89_QATAR][3] = 48, + [1][1][0][0][RTW89_THAILAND][3] = 48, [1][1][0][0][RTW89_FCC][4] = 46, [1][1][0][0][RTW89_ETSI][4] = 48, [1][1][0][0][RTW89_MKK][4] = 58, [1][1][0][0][RTW89_IC][4] = 46, [1][1][0][0][RTW89_KCC][4] = 56, [1][1][0][0][RTW89_ACMA][4] = 48, - [1][1][0][0][RTW89_CN][4] = 46, + [1][1][0][0][RTW89_CN][4] = 44, [1][1][0][0][RTW89_UK][4] = 48, [1][1][0][0][RTW89_MEXICO][4] = 46, [1][1][0][0][RTW89_UKRAINE][4] = 48, [1][1][0][0][RTW89_CHILE][4] = 46, [1][1][0][0][RTW89_QATAR][4] = 48, + [1][1][0][0][RTW89_THAILAND][4] = 48, [1][1][0][0][RTW89_FCC][5] = 48, [1][1][0][0][RTW89_ETSI][5] = 48, [1][1][0][0][RTW89_MKK][5] = 58, [1][1][0][0][RTW89_IC][5] = 48, [1][1][0][0][RTW89_KCC][5] = 56, [1][1][0][0][RTW89_ACMA][5] = 48, - [1][1][0][0][RTW89_CN][5] = 46, + [1][1][0][0][RTW89_CN][5] = 44, [1][1][0][0][RTW89_UK][5] = 48, [1][1][0][0][RTW89_MEXICO][5] = 48, [1][1][0][0][RTW89_UKRAINE][5] = 48, [1][1][0][0][RTW89_CHILE][5] = 48, [1][1][0][0][RTW89_QATAR][5] = 48, + [1][1][0][0][RTW89_THAILAND][5] = 48, [1][1][0][0][RTW89_FCC][6] = 40, [1][1][0][0][RTW89_ETSI][6] = 48, [1][1][0][0][RTW89_MKK][6] = 58, [1][1][0][0][RTW89_IC][6] = 40, [1][1][0][0][RTW89_KCC][6] = 56, [1][1][0][0][RTW89_ACMA][6] = 48, - [1][1][0][0][RTW89_CN][6] = 46, + [1][1][0][0][RTW89_CN][6] = 44, [1][1][0][0][RTW89_UK][6] = 48, [1][1][0][0][RTW89_MEXICO][6] = 40, [1][1][0][0][RTW89_UKRAINE][6] = 48, [1][1][0][0][RTW89_CHILE][6] = 40, [1][1][0][0][RTW89_QATAR][6] = 48, + [1][1][0][0][RTW89_THAILAND][6] = 48, [1][1][0][0][RTW89_FCC][7] = 40, [1][1][0][0][RTW89_ETSI][7] = 48, [1][1][0][0][RTW89_MKK][7] = 58, [1][1][0][0][RTW89_IC][7] = 40, [1][1][0][0][RTW89_KCC][7] = 56, [1][1][0][0][RTW89_ACMA][7] = 48, - [1][1][0][0][RTW89_CN][7] = 46, + [1][1][0][0][RTW89_CN][7] = 44, [1][1][0][0][RTW89_UK][7] = 48, [1][1][0][0][RTW89_MEXICO][7] = 40, [1][1][0][0][RTW89_UKRAINE][7] = 48, [1][1][0][0][RTW89_CHILE][7] = 40, [1][1][0][0][RTW89_QATAR][7] = 48, + [1][1][0][0][RTW89_THAILAND][7] = 48, [1][1][0][0][RTW89_FCC][8] = 14, [1][1][0][0][RTW89_ETSI][8] = 48, [1][1][0][0][RTW89_MKK][8] = 58, [1][1][0][0][RTW89_IC][8] = 14, [1][1][0][0][RTW89_KCC][8] = 58, [1][1][0][0][RTW89_ACMA][8] = 48, - [1][1][0][0][RTW89_CN][8] = 46, + [1][1][0][0][RTW89_CN][8] = 44, [1][1][0][0][RTW89_UK][8] = 48, [1][1][0][0][RTW89_MEXICO][8] = 14, [1][1][0][0][RTW89_UKRAINE][8] = 48, [1][1][0][0][RTW89_CHILE][8] = 14, [1][1][0][0][RTW89_QATAR][8] = 48, + [1][1][0][0][RTW89_THAILAND][8] = 48, [1][1][0][0][RTW89_FCC][9] = 14, [1][1][0][0][RTW89_ETSI][9] = 48, [1][1][0][0][RTW89_MKK][9] = 58, [1][1][0][0][RTW89_IC][9] = 14, [1][1][0][0][RTW89_KCC][9] = 58, [1][1][0][0][RTW89_ACMA][9] = 48, - [1][1][0][0][RTW89_CN][9] = 46, + [1][1][0][0][RTW89_CN][9] = 44, [1][1][0][0][RTW89_UK][9] = 48, [1][1][0][0][RTW89_MEXICO][9] = 14, [1][1][0][0][RTW89_UKRAINE][9] = 48, [1][1][0][0][RTW89_CHILE][9] = 14, [1][1][0][0][RTW89_QATAR][9] = 48, + [1][1][0][0][RTW89_THAILAND][9] = 48, [1][1][0][0][RTW89_FCC][10] = 12, [1][1][0][0][RTW89_ETSI][10] = 48, [1][1][0][0][RTW89_MKK][10] = 56, [1][1][0][0][RTW89_IC][10] = 12, [1][1][0][0][RTW89_KCC][10] = 58, [1][1][0][0][RTW89_ACMA][10] = 48, - [1][1][0][0][RTW89_CN][10] = 46, + [1][1][0][0][RTW89_CN][10] = 44, [1][1][0][0][RTW89_UK][10] = 48, [1][1][0][0][RTW89_MEXICO][10] = 12, [1][1][0][0][RTW89_UKRAINE][10] = 48, [1][1][0][0][RTW89_CHILE][10] = 12, [1][1][0][0][RTW89_QATAR][10] = 48, + [1][1][0][0][RTW89_THAILAND][10] = 48, [1][1][0][0][RTW89_FCC][11] = 127, [1][1][0][0][RTW89_ETSI][11] = 127, [1][1][0][0][RTW89_MKK][11] = 127, @@ -32394,6 +32492,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][11] = 127, [1][1][0][0][RTW89_CHILE][11] = 127, [1][1][0][0][RTW89_QATAR][11] = 127, + [1][1][0][0][RTW89_THAILAND][11] = 127, [1][1][0][0][RTW89_FCC][12] = 127, [1][1][0][0][RTW89_ETSI][12] = 127, [1][1][0][0][RTW89_MKK][12] = 127, @@ -32406,6 +32505,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][12] = 127, [1][1][0][0][RTW89_CHILE][12] = 127, [1][1][0][0][RTW89_QATAR][12] = 127, + [1][1][0][0][RTW89_THAILAND][12] = 127, [1][1][0][0][RTW89_FCC][13] = 127, [1][1][0][0][RTW89_ETSI][13] = 127, [1][1][0][0][RTW89_MKK][13] = 127, @@ -32418,6 +32518,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][13] = 127, [1][1][0][0][RTW89_CHILE][13] = 127, [1][1][0][0][RTW89_QATAR][13] = 127, + [1][1][0][0][RTW89_THAILAND][13] = 127, [0][0][1][0][RTW89_FCC][0] = 66, [0][0][1][0][RTW89_ETSI][0] = 60, [0][0][1][0][RTW89_MKK][0] = 76, @@ -32430,6 +32531,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][0] = 60, [0][0][1][0][RTW89_CHILE][0] = 66, [0][0][1][0][RTW89_QATAR][0] = 60, + [0][0][1][0][RTW89_THAILAND][0] = 60, [0][0][1][0][RTW89_FCC][1] = 68, [0][0][1][0][RTW89_ETSI][1] = 60, [0][0][1][0][RTW89_MKK][1] = 78, @@ -32442,6 +32544,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][1] = 60, [0][0][1][0][RTW89_CHILE][1] = 68, [0][0][1][0][RTW89_QATAR][1] = 60, + [0][0][1][0][RTW89_THAILAND][1] = 60, [0][0][1][0][RTW89_FCC][2] = 72, [0][0][1][0][RTW89_ETSI][2] = 60, [0][0][1][0][RTW89_MKK][2] = 78, @@ -32454,6 +32557,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][2] = 60, [0][0][1][0][RTW89_CHILE][2] = 62, [0][0][1][0][RTW89_QATAR][2] = 60, + [0][0][1][0][RTW89_THAILAND][2] = 60, [0][0][1][0][RTW89_FCC][3] = 76, [0][0][1][0][RTW89_ETSI][3] = 60, [0][0][1][0][RTW89_MKK][3] = 78, @@ -32466,6 +32570,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][3] = 60, [0][0][1][0][RTW89_CHILE][3] = 62, [0][0][1][0][RTW89_QATAR][3] = 60, + [0][0][1][0][RTW89_THAILAND][3] = 60, [0][0][1][0][RTW89_FCC][4] = 80, [0][0][1][0][RTW89_ETSI][4] = 60, [0][0][1][0][RTW89_MKK][4] = 78, @@ -32478,6 +32583,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][4] = 60, [0][0][1][0][RTW89_CHILE][4] = 62, [0][0][1][0][RTW89_QATAR][4] = 60, + [0][0][1][0][RTW89_THAILAND][4] = 60, [0][0][1][0][RTW89_FCC][5] = 80, [0][0][1][0][RTW89_ETSI][5] = 60, [0][0][1][0][RTW89_MKK][5] = 78, @@ -32490,6 +32596,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][5] = 60, [0][0][1][0][RTW89_CHILE][5] = 80, [0][0][1][0][RTW89_QATAR][5] = 60, + [0][0][1][0][RTW89_THAILAND][5] = 60, [0][0][1][0][RTW89_FCC][6] = 80, [0][0][1][0][RTW89_ETSI][6] = 60, [0][0][1][0][RTW89_MKK][6] = 76, @@ -32502,6 +32609,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][6] = 60, [0][0][1][0][RTW89_CHILE][6] = 70, [0][0][1][0][RTW89_QATAR][6] = 60, + [0][0][1][0][RTW89_THAILAND][6] = 60, [0][0][1][0][RTW89_FCC][7] = 80, [0][0][1][0][RTW89_ETSI][7] = 60, [0][0][1][0][RTW89_MKK][7] = 78, @@ -32514,6 +32622,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][7] = 60, [0][0][1][0][RTW89_CHILE][7] = 70, [0][0][1][0][RTW89_QATAR][7] = 60, + [0][0][1][0][RTW89_THAILAND][7] = 60, [0][0][1][0][RTW89_FCC][8] = 80, [0][0][1][0][RTW89_ETSI][8] = 60, [0][0][1][0][RTW89_MKK][8] = 78, @@ -32526,6 +32635,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][8] = 60, [0][0][1][0][RTW89_CHILE][8] = 70, [0][0][1][0][RTW89_QATAR][8] = 60, + [0][0][1][0][RTW89_THAILAND][8] = 60, [0][0][1][0][RTW89_FCC][9] = 76, [0][0][1][0][RTW89_ETSI][9] = 60, [0][0][1][0][RTW89_MKK][9] = 78, @@ -32538,6 +32648,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][9] = 60, [0][0][1][0][RTW89_CHILE][9] = 76, [0][0][1][0][RTW89_QATAR][9] = 60, + [0][0][1][0][RTW89_THAILAND][9] = 60, [0][0][1][0][RTW89_FCC][10] = 66, [0][0][1][0][RTW89_ETSI][10] = 60, [0][0][1][0][RTW89_MKK][10] = 78, @@ -32550,6 +32661,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][10] = 60, [0][0][1][0][RTW89_CHILE][10] = 66, [0][0][1][0][RTW89_QATAR][10] = 60, + [0][0][1][0][RTW89_THAILAND][10] = 60, [0][0][1][0][RTW89_FCC][11] = 62, [0][0][1][0][RTW89_ETSI][11] = 60, [0][0][1][0][RTW89_MKK][11] = 78, @@ -32562,18 +32674,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][11] = 60, [0][0][1][0][RTW89_CHILE][11] = 62, [0][0][1][0][RTW89_QATAR][11] = 60, + [0][0][1][0][RTW89_THAILAND][11] = 60, [0][0][1][0][RTW89_FCC][12] = 60, [0][0][1][0][RTW89_ETSI][12] = 60, [0][0][1][0][RTW89_MKK][12] = 78, [0][0][1][0][RTW89_IC][12] = 60, [0][0][1][0][RTW89_KCC][12] = 70, [0][0][1][0][RTW89_ACMA][12] = 60, - [0][0][1][0][RTW89_CN][12] = 58, + [0][0][1][0][RTW89_CN][12] = 40, [0][0][1][0][RTW89_UK][12] = 60, [0][0][1][0][RTW89_MEXICO][12] = 60, [0][0][1][0][RTW89_UKRAINE][12] = 60, [0][0][1][0][RTW89_CHILE][12] = 60, [0][0][1][0][RTW89_QATAR][12] = 60, + [0][0][1][0][RTW89_THAILAND][12] = 60, [0][0][1][0][RTW89_FCC][13] = 127, [0][0][1][0][RTW89_ETSI][13] = 127, [0][0][1][0][RTW89_MKK][13] = 127, @@ -32586,6 +32700,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][13] = 127, [0][0][1][0][RTW89_CHILE][13] = 127, [0][0][1][0][RTW89_QATAR][13] = 127, + [0][0][1][0][RTW89_THAILAND][13] = 127, [0][1][1][0][RTW89_FCC][0] = 66, [0][1][1][0][RTW89_ETSI][0] = 48, [0][1][1][0][RTW89_MKK][0] = 66, @@ -32598,6 +32713,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][0] = 48, [0][1][1][0][RTW89_CHILE][0] = 66, [0][1][1][0][RTW89_QATAR][0] = 48, + [0][1][1][0][RTW89_THAILAND][0] = 48, [0][1][1][0][RTW89_FCC][1] = 68, [0][1][1][0][RTW89_ETSI][1] = 48, [0][1][1][0][RTW89_MKK][1] = 66, @@ -32610,6 +32726,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][1] = 48, [0][1][1][0][RTW89_CHILE][1] = 68, [0][1][1][0][RTW89_QATAR][1] = 48, + [0][1][1][0][RTW89_THAILAND][1] = 48, [0][1][1][0][RTW89_FCC][2] = 72, [0][1][1][0][RTW89_ETSI][2] = 48, [0][1][1][0][RTW89_MKK][2] = 66, @@ -32622,6 +32739,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][2] = 48, [0][1][1][0][RTW89_CHILE][2] = 54, [0][1][1][0][RTW89_QATAR][2] = 48, + [0][1][1][0][RTW89_THAILAND][2] = 48, [0][1][1][0][RTW89_FCC][3] = 76, [0][1][1][0][RTW89_ETSI][3] = 48, [0][1][1][0][RTW89_MKK][3] = 66, @@ -32634,6 +32752,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][3] = 48, [0][1][1][0][RTW89_CHILE][3] = 54, [0][1][1][0][RTW89_QATAR][3] = 48, + [0][1][1][0][RTW89_THAILAND][3] = 48, [0][1][1][0][RTW89_FCC][4] = 80, [0][1][1][0][RTW89_ETSI][4] = 48, [0][1][1][0][RTW89_MKK][4] = 66, @@ -32646,6 +32765,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][4] = 48, [0][1][1][0][RTW89_CHILE][4] = 54, [0][1][1][0][RTW89_QATAR][4] = 48, + [0][1][1][0][RTW89_THAILAND][4] = 48, [0][1][1][0][RTW89_FCC][5] = 80, [0][1][1][0][RTW89_ETSI][5] = 48, [0][1][1][0][RTW89_MKK][5] = 66, @@ -32658,6 +32778,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][5] = 48, [0][1][1][0][RTW89_CHILE][5] = 80, [0][1][1][0][RTW89_QATAR][5] = 48, + [0][1][1][0][RTW89_THAILAND][5] = 48, [0][1][1][0][RTW89_FCC][6] = 80, [0][1][1][0][RTW89_ETSI][6] = 48, [0][1][1][0][RTW89_MKK][6] = 66, @@ -32670,6 +32791,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][6] = 48, [0][1][1][0][RTW89_CHILE][6] = 56, [0][1][1][0][RTW89_QATAR][6] = 48, + [0][1][1][0][RTW89_THAILAND][6] = 48, [0][1][1][0][RTW89_FCC][7] = 78, [0][1][1][0][RTW89_ETSI][7] = 48, [0][1][1][0][RTW89_MKK][7] = 66, @@ -32682,6 +32804,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][7] = 48, [0][1][1][0][RTW89_CHILE][7] = 56, [0][1][1][0][RTW89_QATAR][7] = 48, + [0][1][1][0][RTW89_THAILAND][7] = 48, [0][1][1][0][RTW89_FCC][8] = 74, [0][1][1][0][RTW89_ETSI][8] = 48, [0][1][1][0][RTW89_MKK][8] = 66, @@ -32694,6 +32817,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][8] = 48, [0][1][1][0][RTW89_CHILE][8] = 56, [0][1][1][0][RTW89_QATAR][8] = 48, + [0][1][1][0][RTW89_THAILAND][8] = 48, [0][1][1][0][RTW89_FCC][9] = 70, [0][1][1][0][RTW89_ETSI][9] = 48, [0][1][1][0][RTW89_MKK][9] = 66, @@ -32706,6 +32830,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][9] = 48, [0][1][1][0][RTW89_CHILE][9] = 70, [0][1][1][0][RTW89_QATAR][9] = 48, + [0][1][1][0][RTW89_THAILAND][9] = 48, [0][1][1][0][RTW89_FCC][10] = 62, [0][1][1][0][RTW89_ETSI][10] = 48, [0][1][1][0][RTW89_MKK][10] = 66, @@ -32718,6 +32843,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][10] = 48, [0][1][1][0][RTW89_CHILE][10] = 62, [0][1][1][0][RTW89_QATAR][10] = 48, + [0][1][1][0][RTW89_THAILAND][10] = 48, [0][1][1][0][RTW89_FCC][11] = 60, [0][1][1][0][RTW89_ETSI][11] = 48, [0][1][1][0][RTW89_MKK][11] = 66, @@ -32730,18 +32856,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][11] = 48, [0][1][1][0][RTW89_CHILE][11] = 60, [0][1][1][0][RTW89_QATAR][11] = 48, + [0][1][1][0][RTW89_THAILAND][11] = 48, [0][1][1][0][RTW89_FCC][12] = 36, [0][1][1][0][RTW89_ETSI][12] = 48, [0][1][1][0][RTW89_MKK][12] = 66, [0][1][1][0][RTW89_IC][12] = 36, [0][1][1][0][RTW89_KCC][12] = 64, [0][1][1][0][RTW89_ACMA][12] = 48, - [0][1][1][0][RTW89_CN][12] = 46, + [0][1][1][0][RTW89_CN][12] = 40, [0][1][1][0][RTW89_UK][12] = 48, [0][1][1][0][RTW89_MEXICO][12] = 36, [0][1][1][0][RTW89_UKRAINE][12] = 48, [0][1][1][0][RTW89_CHILE][12] = 36, [0][1][1][0][RTW89_QATAR][12] = 48, + [0][1][1][0][RTW89_THAILAND][12] = 48, [0][1][1][0][RTW89_FCC][13] = 127, [0][1][1][0][RTW89_ETSI][13] = 127, [0][1][1][0][RTW89_MKK][13] = 127, @@ -32754,6 +32882,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][13] = 127, [0][1][1][0][RTW89_CHILE][13] = 127, [0][1][1][0][RTW89_QATAR][13] = 127, + [0][1][1][0][RTW89_THAILAND][13] = 127, [0][0][2][0][RTW89_FCC][0] = 66, [0][0][2][0][RTW89_ETSI][0] = 60, [0][0][2][0][RTW89_MKK][0] = 78, @@ -32766,6 +32895,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][0] = 60, [0][0][2][0][RTW89_CHILE][0] = 66, [0][0][2][0][RTW89_QATAR][0] = 60, + [0][0][2][0][RTW89_THAILAND][0] = 60, [0][0][2][0][RTW89_FCC][1] = 70, [0][0][2][0][RTW89_ETSI][1] = 60, [0][0][2][0][RTW89_MKK][1] = 78, @@ -32778,6 +32908,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][1] = 60, [0][0][2][0][RTW89_CHILE][1] = 70, [0][0][2][0][RTW89_QATAR][1] = 60, + [0][0][2][0][RTW89_THAILAND][1] = 60, [0][0][2][0][RTW89_FCC][2] = 74, [0][0][2][0][RTW89_ETSI][2] = 60, [0][0][2][0][RTW89_MKK][2] = 78, @@ -32790,6 +32921,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][2] = 60, [0][0][2][0][RTW89_CHILE][2] = 64, [0][0][2][0][RTW89_QATAR][2] = 60, + [0][0][2][0][RTW89_THAILAND][2] = 60, [0][0][2][0][RTW89_FCC][3] = 78, [0][0][2][0][RTW89_ETSI][3] = 60, [0][0][2][0][RTW89_MKK][3] = 78, @@ -32802,6 +32934,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][3] = 60, [0][0][2][0][RTW89_CHILE][3] = 64, [0][0][2][0][RTW89_QATAR][3] = 60, + [0][0][2][0][RTW89_THAILAND][3] = 60, [0][0][2][0][RTW89_FCC][4] = 80, [0][0][2][0][RTW89_ETSI][4] = 60, [0][0][2][0][RTW89_MKK][4] = 78, @@ -32814,6 +32947,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][4] = 60, [0][0][2][0][RTW89_CHILE][4] = 64, [0][0][2][0][RTW89_QATAR][4] = 60, + [0][0][2][0][RTW89_THAILAND][4] = 60, [0][0][2][0][RTW89_FCC][5] = 80, [0][0][2][0][RTW89_ETSI][5] = 60, [0][0][2][0][RTW89_MKK][5] = 78, @@ -32826,6 +32960,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][5] = 60, [0][0][2][0][RTW89_CHILE][5] = 80, [0][0][2][0][RTW89_QATAR][5] = 60, + [0][0][2][0][RTW89_THAILAND][5] = 60, [0][0][2][0][RTW89_FCC][6] = 80, [0][0][2][0][RTW89_ETSI][6] = 60, [0][0][2][0][RTW89_MKK][6] = 78, @@ -32838,6 +32973,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][6] = 60, [0][0][2][0][RTW89_CHILE][6] = 68, [0][0][2][0][RTW89_QATAR][6] = 60, + [0][0][2][0][RTW89_THAILAND][6] = 60, [0][0][2][0][RTW89_FCC][7] = 80, [0][0][2][0][RTW89_ETSI][7] = 60, [0][0][2][0][RTW89_MKK][7] = 78, @@ -32850,6 +32986,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][7] = 60, [0][0][2][0][RTW89_CHILE][7] = 68, [0][0][2][0][RTW89_QATAR][7] = 60, + [0][0][2][0][RTW89_THAILAND][7] = 60, [0][0][2][0][RTW89_FCC][8] = 78, [0][0][2][0][RTW89_ETSI][8] = 60, [0][0][2][0][RTW89_MKK][8] = 78, @@ -32862,6 +32999,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][8] = 60, [0][0][2][0][RTW89_CHILE][8] = 68, [0][0][2][0][RTW89_QATAR][8] = 60, + [0][0][2][0][RTW89_THAILAND][8] = 60, [0][0][2][0][RTW89_FCC][9] = 74, [0][0][2][0][RTW89_ETSI][9] = 60, [0][0][2][0][RTW89_MKK][9] = 78, @@ -32874,6 +33012,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][9] = 60, [0][0][2][0][RTW89_CHILE][9] = 74, [0][0][2][0][RTW89_QATAR][9] = 60, + [0][0][2][0][RTW89_THAILAND][9] = 60, [0][0][2][0][RTW89_FCC][10] = 62, [0][0][2][0][RTW89_ETSI][10] = 60, [0][0][2][0][RTW89_MKK][10] = 78, @@ -32886,6 +33025,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][10] = 60, [0][0][2][0][RTW89_CHILE][10] = 62, [0][0][2][0][RTW89_QATAR][10] = 60, + [0][0][2][0][RTW89_THAILAND][10] = 60, [0][0][2][0][RTW89_FCC][11] = 60, [0][0][2][0][RTW89_ETSI][11] = 60, [0][0][2][0][RTW89_MKK][11] = 78, @@ -32898,18 +33038,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][11] = 60, [0][0][2][0][RTW89_CHILE][11] = 60, [0][0][2][0][RTW89_QATAR][11] = 60, + [0][0][2][0][RTW89_THAILAND][11] = 60, [0][0][2][0][RTW89_FCC][12] = 38, [0][0][2][0][RTW89_ETSI][12] = 60, [0][0][2][0][RTW89_MKK][12] = 78, [0][0][2][0][RTW89_IC][12] = 38, [0][0][2][0][RTW89_KCC][12] = 66, [0][0][2][0][RTW89_ACMA][12] = 60, - [0][0][2][0][RTW89_CN][12] = 58, + [0][0][2][0][RTW89_CN][12] = 38, [0][0][2][0][RTW89_UK][12] = 60, [0][0][2][0][RTW89_MEXICO][12] = 38, [0][0][2][0][RTW89_UKRAINE][12] = 60, [0][0][2][0][RTW89_CHILE][12] = 38, [0][0][2][0][RTW89_QATAR][12] = 60, + [0][0][2][0][RTW89_THAILAND][12] = 60, [0][0][2][0][RTW89_FCC][13] = 127, [0][0][2][0][RTW89_ETSI][13] = 127, [0][0][2][0][RTW89_MKK][13] = 127, @@ -32922,6 +33064,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][13] = 127, [0][0][2][0][RTW89_CHILE][13] = 127, [0][0][2][0][RTW89_QATAR][13] = 127, + [0][0][2][0][RTW89_THAILAND][13] = 127, [0][1][2][0][RTW89_FCC][0] = 64, [0][1][2][0][RTW89_ETSI][0] = 48, [0][1][2][0][RTW89_MKK][0] = 68, @@ -32934,6 +33077,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][0] = 48, [0][1][2][0][RTW89_CHILE][0] = 64, [0][1][2][0][RTW89_QATAR][0] = 48, + [0][1][2][0][RTW89_THAILAND][0] = 48, [0][1][2][0][RTW89_FCC][1] = 70, [0][1][2][0][RTW89_ETSI][1] = 48, [0][1][2][0][RTW89_MKK][1] = 68, @@ -32946,6 +33090,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][1] = 48, [0][1][2][0][RTW89_CHILE][1] = 70, [0][1][2][0][RTW89_QATAR][1] = 48, + [0][1][2][0][RTW89_THAILAND][1] = 48, [0][1][2][0][RTW89_FCC][2] = 74, [0][1][2][0][RTW89_ETSI][2] = 48, [0][1][2][0][RTW89_MKK][2] = 68, @@ -32958,6 +33103,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][2] = 48, [0][1][2][0][RTW89_CHILE][2] = 56, [0][1][2][0][RTW89_QATAR][2] = 48, + [0][1][2][0][RTW89_THAILAND][2] = 48, [0][1][2][0][RTW89_FCC][3] = 78, [0][1][2][0][RTW89_ETSI][3] = 48, [0][1][2][0][RTW89_MKK][3] = 68, @@ -32970,6 +33116,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][3] = 48, [0][1][2][0][RTW89_CHILE][3] = 56, [0][1][2][0][RTW89_QATAR][3] = 48, + [0][1][2][0][RTW89_THAILAND][3] = 48, [0][1][2][0][RTW89_FCC][4] = 80, [0][1][2][0][RTW89_ETSI][4] = 48, [0][1][2][0][RTW89_MKK][4] = 68, @@ -32982,6 +33129,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][4] = 48, [0][1][2][0][RTW89_CHILE][4] = 56, [0][1][2][0][RTW89_QATAR][4] = 48, + [0][1][2][0][RTW89_THAILAND][4] = 48, [0][1][2][0][RTW89_FCC][5] = 80, [0][1][2][0][RTW89_ETSI][5] = 48, [0][1][2][0][RTW89_MKK][5] = 68, @@ -32994,6 +33142,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][5] = 48, [0][1][2][0][RTW89_CHILE][5] = 78, [0][1][2][0][RTW89_QATAR][5] = 48, + [0][1][2][0][RTW89_THAILAND][5] = 48, [0][1][2][0][RTW89_FCC][6] = 80, [0][1][2][0][RTW89_ETSI][6] = 48, [0][1][2][0][RTW89_MKK][6] = 68, @@ -33006,6 +33155,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][6] = 48, [0][1][2][0][RTW89_CHILE][6] = 54, [0][1][2][0][RTW89_QATAR][6] = 48, + [0][1][2][0][RTW89_THAILAND][6] = 48, [0][1][2][0][RTW89_FCC][7] = 74, [0][1][2][0][RTW89_ETSI][7] = 48, [0][1][2][0][RTW89_MKK][7] = 68, @@ -33018,6 +33168,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][7] = 48, [0][1][2][0][RTW89_CHILE][7] = 54, [0][1][2][0][RTW89_QATAR][7] = 48, + [0][1][2][0][RTW89_THAILAND][7] = 48, [0][1][2][0][RTW89_FCC][8] = 70, [0][1][2][0][RTW89_ETSI][8] = 48, [0][1][2][0][RTW89_MKK][8] = 68, @@ -33030,6 +33181,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][8] = 48, [0][1][2][0][RTW89_CHILE][8] = 54, [0][1][2][0][RTW89_QATAR][8] = 48, + [0][1][2][0][RTW89_THAILAND][8] = 48, [0][1][2][0][RTW89_FCC][9] = 66, [0][1][2][0][RTW89_ETSI][9] = 48, [0][1][2][0][RTW89_MKK][9] = 68, @@ -33042,6 +33194,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][9] = 48, [0][1][2][0][RTW89_CHILE][9] = 66, [0][1][2][0][RTW89_QATAR][9] = 48, + [0][1][2][0][RTW89_THAILAND][9] = 48, [0][1][2][0][RTW89_FCC][10] = 58, [0][1][2][0][RTW89_ETSI][10] = 48, [0][1][2][0][RTW89_MKK][10] = 68, @@ -33054,6 +33207,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][10] = 48, [0][1][2][0][RTW89_CHILE][10] = 58, [0][1][2][0][RTW89_QATAR][10] = 48, + [0][1][2][0][RTW89_THAILAND][10] = 48, [0][1][2][0][RTW89_FCC][11] = 58, [0][1][2][0][RTW89_ETSI][11] = 48, [0][1][2][0][RTW89_MKK][11] = 68, @@ -33066,18 +33220,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][11] = 48, [0][1][2][0][RTW89_CHILE][11] = 58, [0][1][2][0][RTW89_QATAR][11] = 48, + [0][1][2][0][RTW89_THAILAND][11] = 48, [0][1][2][0][RTW89_FCC][12] = 16, [0][1][2][0][RTW89_ETSI][12] = 48, [0][1][2][0][RTW89_MKK][12] = 68, [0][1][2][0][RTW89_IC][12] = 16, [0][1][2][0][RTW89_KCC][12] = 64, [0][1][2][0][RTW89_ACMA][12] = 48, - [0][1][2][0][RTW89_CN][12] = 46, + [0][1][2][0][RTW89_CN][12] = 38, [0][1][2][0][RTW89_UK][12] = 48, [0][1][2][0][RTW89_MEXICO][12] = 16, [0][1][2][0][RTW89_UKRAINE][12] = 48, [0][1][2][0][RTW89_CHILE][12] = 16, [0][1][2][0][RTW89_QATAR][12] = 48, + [0][1][2][0][RTW89_THAILAND][12] = 48, [0][1][2][0][RTW89_FCC][13] = 127, [0][1][2][0][RTW89_ETSI][13] = 127, [0][1][2][0][RTW89_MKK][13] = 127, @@ -33090,18 +33246,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][13] = 127, [0][1][2][0][RTW89_CHILE][13] = 127, [0][1][2][0][RTW89_QATAR][13] = 127, + [0][1][2][0][RTW89_THAILAND][13] = 127, [0][1][2][1][RTW89_FCC][0] = 64, [0][1][2][1][RTW89_ETSI][0] = 36, [0][1][2][1][RTW89_MKK][0] = 68, [0][1][2][1][RTW89_IC][0] = 64, [0][1][2][1][RTW89_KCC][0] = 66, [0][1][2][1][RTW89_ACMA][0] = 36, - [0][1][2][1][RTW89_CN][0] = 36, + [0][1][2][1][RTW89_CN][0] = 34, [0][1][2][1][RTW89_UK][0] = 36, [0][1][2][1][RTW89_MEXICO][0] = 64, [0][1][2][1][RTW89_UKRAINE][0] = 36, [0][1][2][1][RTW89_CHILE][0] = 64, [0][1][2][1][RTW89_QATAR][0] = 36, + [0][1][2][1][RTW89_THAILAND][0] = 36, [0][1][2][1][RTW89_FCC][1] = 70, [0][1][2][1][RTW89_ETSI][1] = 36, [0][1][2][1][RTW89_MKK][1] = 68, @@ -33114,6 +33272,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][1] = 36, [0][1][2][1][RTW89_CHILE][1] = 70, [0][1][2][1][RTW89_QATAR][1] = 36, + [0][1][2][1][RTW89_THAILAND][1] = 36, [0][1][2][1][RTW89_FCC][2] = 74, [0][1][2][1][RTW89_ETSI][2] = 36, [0][1][2][1][RTW89_MKK][2] = 68, @@ -33126,6 +33285,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][2] = 36, [0][1][2][1][RTW89_CHILE][2] = 44, [0][1][2][1][RTW89_QATAR][2] = 36, + [0][1][2][1][RTW89_THAILAND][2] = 36, [0][1][2][1][RTW89_FCC][3] = 78, [0][1][2][1][RTW89_ETSI][3] = 36, [0][1][2][1][RTW89_MKK][3] = 68, @@ -33138,6 +33298,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][3] = 36, [0][1][2][1][RTW89_CHILE][3] = 44, [0][1][2][1][RTW89_QATAR][3] = 36, + [0][1][2][1][RTW89_THAILAND][3] = 36, [0][1][2][1][RTW89_FCC][4] = 80, [0][1][2][1][RTW89_ETSI][4] = 36, [0][1][2][1][RTW89_MKK][4] = 68, @@ -33150,6 +33311,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][4] = 36, [0][1][2][1][RTW89_CHILE][4] = 44, [0][1][2][1][RTW89_QATAR][4] = 36, + [0][1][2][1][RTW89_THAILAND][4] = 36, [0][1][2][1][RTW89_FCC][5] = 80, [0][1][2][1][RTW89_ETSI][5] = 36, [0][1][2][1][RTW89_MKK][5] = 68, @@ -33162,6 +33324,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][5] = 36, [0][1][2][1][RTW89_CHILE][5] = 74, [0][1][2][1][RTW89_QATAR][5] = 36, + [0][1][2][1][RTW89_THAILAND][5] = 36, [0][1][2][1][RTW89_FCC][6] = 80, [0][1][2][1][RTW89_ETSI][6] = 36, [0][1][2][1][RTW89_MKK][6] = 68, @@ -33174,6 +33337,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][6] = 36, [0][1][2][1][RTW89_CHILE][6] = 42, [0][1][2][1][RTW89_QATAR][6] = 36, + [0][1][2][1][RTW89_THAILAND][6] = 36, [0][1][2][1][RTW89_FCC][7] = 74, [0][1][2][1][RTW89_ETSI][7] = 36, [0][1][2][1][RTW89_MKK][7] = 68, @@ -33186,6 +33350,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][7] = 36, [0][1][2][1][RTW89_CHILE][7] = 42, [0][1][2][1][RTW89_QATAR][7] = 36, + [0][1][2][1][RTW89_THAILAND][7] = 36, [0][1][2][1][RTW89_FCC][8] = 70, [0][1][2][1][RTW89_ETSI][8] = 36, [0][1][2][1][RTW89_MKK][8] = 68, @@ -33198,6 +33363,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][8] = 36, [0][1][2][1][RTW89_CHILE][8] = 42, [0][1][2][1][RTW89_QATAR][8] = 36, + [0][1][2][1][RTW89_THAILAND][8] = 36, [0][1][2][1][RTW89_FCC][9] = 66, [0][1][2][1][RTW89_ETSI][9] = 36, [0][1][2][1][RTW89_MKK][9] = 68, @@ -33210,6 +33376,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][9] = 36, [0][1][2][1][RTW89_CHILE][9] = 66, [0][1][2][1][RTW89_QATAR][9] = 36, + [0][1][2][1][RTW89_THAILAND][9] = 36, [0][1][2][1][RTW89_FCC][10] = 58, [0][1][2][1][RTW89_ETSI][10] = 36, [0][1][2][1][RTW89_MKK][10] = 68, @@ -33222,6 +33389,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][10] = 36, [0][1][2][1][RTW89_CHILE][10] = 58, [0][1][2][1][RTW89_QATAR][10] = 36, + [0][1][2][1][RTW89_THAILAND][10] = 36, [0][1][2][1][RTW89_FCC][11] = 58, [0][1][2][1][RTW89_ETSI][11] = 36, [0][1][2][1][RTW89_MKK][11] = 68, @@ -33234,18 +33402,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][11] = 36, [0][1][2][1][RTW89_CHILE][11] = 58, [0][1][2][1][RTW89_QATAR][11] = 36, + [0][1][2][1][RTW89_THAILAND][11] = 36, [0][1][2][1][RTW89_FCC][12] = 16, [0][1][2][1][RTW89_ETSI][12] = 36, [0][1][2][1][RTW89_MKK][12] = 68, [0][1][2][1][RTW89_IC][12] = 16, [0][1][2][1][RTW89_KCC][12] = 64, [0][1][2][1][RTW89_ACMA][12] = 36, - [0][1][2][1][RTW89_CN][12] = 34, + [0][1][2][1][RTW89_CN][12] = 26, [0][1][2][1][RTW89_UK][12] = 36, [0][1][2][1][RTW89_MEXICO][12] = 16, [0][1][2][1][RTW89_UKRAINE][12] = 36, [0][1][2][1][RTW89_CHILE][12] = 16, [0][1][2][1][RTW89_QATAR][12] = 36, + [0][1][2][1][RTW89_THAILAND][12] = 36, [0][1][2][1][RTW89_FCC][13] = 127, [0][1][2][1][RTW89_ETSI][13] = 127, [0][1][2][1][RTW89_MKK][13] = 127, @@ -33258,6 +33428,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][13] = 127, [0][1][2][1][RTW89_CHILE][13] = 127, [0][1][2][1][RTW89_QATAR][13] = 127, + [0][1][2][1][RTW89_THAILAND][13] = 127, [1][0][2][0][RTW89_FCC][0] = 127, [1][0][2][0][RTW89_ETSI][0] = 127, [1][0][2][0][RTW89_MKK][0] = 127, @@ -33270,6 +33441,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][0] = 127, [1][0][2][0][RTW89_CHILE][0] = 127, [1][0][2][0][RTW89_QATAR][0] = 127, + [1][0][2][0][RTW89_THAILAND][0] = 127, [1][0][2][0][RTW89_FCC][1] = 127, [1][0][2][0][RTW89_ETSI][1] = 127, [1][0][2][0][RTW89_MKK][1] = 127, @@ -33282,6 +33454,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][1] = 127, [1][0][2][0][RTW89_CHILE][1] = 127, [1][0][2][0][RTW89_QATAR][1] = 127, + [1][0][2][0][RTW89_THAILAND][1] = 127, [1][0][2][0][RTW89_FCC][2] = 64, [1][0][2][0][RTW89_ETSI][2] = 60, [1][0][2][0][RTW89_MKK][2] = 74, @@ -33294,6 +33467,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][2] = 60, [1][0][2][0][RTW89_CHILE][2] = 64, [1][0][2][0][RTW89_QATAR][2] = 60, + [1][0][2][0][RTW89_THAILAND][2] = 60, [1][0][2][0][RTW89_FCC][3] = 64, [1][0][2][0][RTW89_ETSI][3] = 60, [1][0][2][0][RTW89_MKK][3] = 74, @@ -33306,6 +33480,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][3] = 60, [1][0][2][0][RTW89_CHILE][3] = 64, [1][0][2][0][RTW89_QATAR][3] = 60, + [1][0][2][0][RTW89_THAILAND][3] = 60, [1][0][2][0][RTW89_FCC][4] = 68, [1][0][2][0][RTW89_ETSI][4] = 60, [1][0][2][0][RTW89_MKK][4] = 74, @@ -33318,6 +33493,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][4] = 60, [1][0][2][0][RTW89_CHILE][4] = 68, [1][0][2][0][RTW89_QATAR][4] = 60, + [1][0][2][0][RTW89_THAILAND][4] = 60, [1][0][2][0][RTW89_FCC][5] = 68, [1][0][2][0][RTW89_ETSI][5] = 60, [1][0][2][0][RTW89_MKK][5] = 74, @@ -33330,6 +33506,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][5] = 60, [1][0][2][0][RTW89_CHILE][5] = 68, [1][0][2][0][RTW89_QATAR][5] = 60, + [1][0][2][0][RTW89_THAILAND][5] = 60, [1][0][2][0][RTW89_FCC][6] = 66, [1][0][2][0][RTW89_ETSI][6] = 60, [1][0][2][0][RTW89_MKK][6] = 74, @@ -33342,6 +33519,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][6] = 60, [1][0][2][0][RTW89_CHILE][6] = 66, [1][0][2][0][RTW89_QATAR][6] = 60, + [1][0][2][0][RTW89_THAILAND][6] = 60, [1][0][2][0][RTW89_FCC][7] = 62, [1][0][2][0][RTW89_ETSI][7] = 60, [1][0][2][0][RTW89_MKK][7] = 74, @@ -33354,6 +33532,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][7] = 60, [1][0][2][0][RTW89_CHILE][7] = 62, [1][0][2][0][RTW89_QATAR][7] = 60, + [1][0][2][0][RTW89_THAILAND][7] = 60, [1][0][2][0][RTW89_FCC][8] = 62, [1][0][2][0][RTW89_ETSI][8] = 60, [1][0][2][0][RTW89_MKK][8] = 74, @@ -33366,6 +33545,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][8] = 60, [1][0][2][0][RTW89_CHILE][8] = 62, [1][0][2][0][RTW89_QATAR][8] = 60, + [1][0][2][0][RTW89_THAILAND][8] = 60, [1][0][2][0][RTW89_FCC][9] = 60, [1][0][2][0][RTW89_ETSI][9] = 60, [1][0][2][0][RTW89_MKK][9] = 74, @@ -33378,6 +33558,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][9] = 60, [1][0][2][0][RTW89_CHILE][9] = 60, [1][0][2][0][RTW89_QATAR][9] = 60, + [1][0][2][0][RTW89_THAILAND][9] = 60, [1][0][2][0][RTW89_FCC][10] = 56, [1][0][2][0][RTW89_ETSI][10] = 60, [1][0][2][0][RTW89_MKK][10] = 74, @@ -33390,6 +33571,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][10] = 60, [1][0][2][0][RTW89_CHILE][10] = 56, [1][0][2][0][RTW89_QATAR][10] = 60, + [1][0][2][0][RTW89_THAILAND][10] = 60, [1][0][2][0][RTW89_FCC][11] = 127, [1][0][2][0][RTW89_ETSI][11] = 127, [1][0][2][0][RTW89_MKK][11] = 127, @@ -33402,6 +33584,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][11] = 127, [1][0][2][0][RTW89_CHILE][11] = 127, [1][0][2][0][RTW89_QATAR][11] = 127, + [1][0][2][0][RTW89_THAILAND][11] = 127, [1][0][2][0][RTW89_FCC][12] = 127, [1][0][2][0][RTW89_ETSI][12] = 127, [1][0][2][0][RTW89_MKK][12] = 127, @@ -33414,6 +33597,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][12] = 127, [1][0][2][0][RTW89_CHILE][12] = 127, [1][0][2][0][RTW89_QATAR][12] = 127, + [1][0][2][0][RTW89_THAILAND][12] = 127, [1][0][2][0][RTW89_FCC][13] = 127, [1][0][2][0][RTW89_ETSI][13] = 127, [1][0][2][0][RTW89_MKK][13] = 127, @@ -33426,6 +33610,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][13] = 127, [1][0][2][0][RTW89_CHILE][13] = 127, [1][0][2][0][RTW89_QATAR][13] = 127, + [1][0][2][0][RTW89_THAILAND][13] = 127, [1][1][2][0][RTW89_FCC][0] = 127, [1][1][2][0][RTW89_ETSI][0] = 127, [1][1][2][0][RTW89_MKK][0] = 127, @@ -33438,6 +33623,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][0] = 127, [1][1][2][0][RTW89_CHILE][0] = 127, [1][1][2][0][RTW89_QATAR][0] = 127, + [1][1][2][0][RTW89_THAILAND][0] = 127, [1][1][2][0][RTW89_FCC][1] = 127, [1][1][2][0][RTW89_ETSI][1] = 127, [1][1][2][0][RTW89_MKK][1] = 127, @@ -33450,6 +33636,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][1] = 127, [1][1][2][0][RTW89_CHILE][1] = 127, [1][1][2][0][RTW89_QATAR][1] = 127, + [1][1][2][0][RTW89_THAILAND][1] = 127, [1][1][2][0][RTW89_FCC][2] = 60, [1][1][2][0][RTW89_ETSI][2] = 48, [1][1][2][0][RTW89_MKK][2] = 68, @@ -33462,6 +33649,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][2] = 48, [1][1][2][0][RTW89_CHILE][2] = 60, [1][1][2][0][RTW89_QATAR][2] = 48, + [1][1][2][0][RTW89_THAILAND][2] = 48, [1][1][2][0][RTW89_FCC][3] = 60, [1][1][2][0][RTW89_ETSI][3] = 48, [1][1][2][0][RTW89_MKK][3] = 68, @@ -33474,6 +33662,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][3] = 48, [1][1][2][0][RTW89_CHILE][3] = 56, [1][1][2][0][RTW89_QATAR][3] = 48, + [1][1][2][0][RTW89_THAILAND][3] = 48, [1][1][2][0][RTW89_FCC][4] = 60, [1][1][2][0][RTW89_ETSI][4] = 48, [1][1][2][0][RTW89_MKK][4] = 68, @@ -33486,6 +33675,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][4] = 48, [1][1][2][0][RTW89_CHILE][4] = 56, [1][1][2][0][RTW89_QATAR][4] = 48, + [1][1][2][0][RTW89_THAILAND][4] = 48, [1][1][2][0][RTW89_FCC][5] = 60, [1][1][2][0][RTW89_ETSI][5] = 48, [1][1][2][0][RTW89_MKK][5] = 68, @@ -33498,6 +33688,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][5] = 48, [1][1][2][0][RTW89_CHILE][5] = 60, [1][1][2][0][RTW89_QATAR][5] = 48, + [1][1][2][0][RTW89_THAILAND][5] = 48, [1][1][2][0][RTW89_FCC][6] = 58, [1][1][2][0][RTW89_ETSI][6] = 48, [1][1][2][0][RTW89_MKK][6] = 68, @@ -33510,6 +33701,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][6] = 48, [1][1][2][0][RTW89_CHILE][6] = 52, [1][1][2][0][RTW89_QATAR][6] = 48, + [1][1][2][0][RTW89_THAILAND][6] = 48, [1][1][2][0][RTW89_FCC][7] = 54, [1][1][2][0][RTW89_ETSI][7] = 48, [1][1][2][0][RTW89_MKK][7] = 68, @@ -33522,6 +33714,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][7] = 48, [1][1][2][0][RTW89_CHILE][7] = 52, [1][1][2][0][RTW89_QATAR][7] = 48, + [1][1][2][0][RTW89_THAILAND][7] = 48, [1][1][2][0][RTW89_FCC][8] = 54, [1][1][2][0][RTW89_ETSI][8] = 48, [1][1][2][0][RTW89_MKK][8] = 68, @@ -33534,6 +33727,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][8] = 48, [1][1][2][0][RTW89_CHILE][8] = 54, [1][1][2][0][RTW89_QATAR][8] = 48, + [1][1][2][0][RTW89_THAILAND][8] = 48, [1][1][2][0][RTW89_FCC][9] = 54, [1][1][2][0][RTW89_ETSI][9] = 48, [1][1][2][0][RTW89_MKK][9] = 68, @@ -33546,6 +33740,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][9] = 48, [1][1][2][0][RTW89_CHILE][9] = 54, [1][1][2][0][RTW89_QATAR][9] = 48, + [1][1][2][0][RTW89_THAILAND][9] = 48, [1][1][2][0][RTW89_FCC][10] = 46, [1][1][2][0][RTW89_ETSI][10] = 48, [1][1][2][0][RTW89_MKK][10] = 68, @@ -33558,6 +33753,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][10] = 48, [1][1][2][0][RTW89_CHILE][10] = 46, [1][1][2][0][RTW89_QATAR][10] = 48, + [1][1][2][0][RTW89_THAILAND][10] = 48, [1][1][2][0][RTW89_FCC][11] = 127, [1][1][2][0][RTW89_ETSI][11] = 127, [1][1][2][0][RTW89_MKK][11] = 127, @@ -33570,6 +33766,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][11] = 127, [1][1][2][0][RTW89_CHILE][11] = 127, [1][1][2][0][RTW89_QATAR][11] = 127, + [1][1][2][0][RTW89_THAILAND][11] = 127, [1][1][2][0][RTW89_FCC][12] = 127, [1][1][2][0][RTW89_ETSI][12] = 127, [1][1][2][0][RTW89_MKK][12] = 127, @@ -33582,6 +33779,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][12] = 127, [1][1][2][0][RTW89_CHILE][12] = 127, [1][1][2][0][RTW89_QATAR][12] = 127, + [1][1][2][0][RTW89_THAILAND][12] = 127, [1][1][2][0][RTW89_FCC][13] = 127, [1][1][2][0][RTW89_ETSI][13] = 127, [1][1][2][0][RTW89_MKK][13] = 127, @@ -33594,6 +33792,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][13] = 127, [1][1][2][0][RTW89_CHILE][13] = 127, [1][1][2][0][RTW89_QATAR][13] = 127, + [1][1][2][0][RTW89_THAILAND][13] = 127, [1][1][2][1][RTW89_FCC][0] = 127, [1][1][2][1][RTW89_ETSI][0] = 127, [1][1][2][1][RTW89_MKK][0] = 127, @@ -33606,6 +33805,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][0] = 127, [1][1][2][1][RTW89_CHILE][0] = 127, [1][1][2][1][RTW89_QATAR][0] = 127, + [1][1][2][1][RTW89_THAILAND][0] = 127, [1][1][2][1][RTW89_FCC][1] = 127, [1][1][2][1][RTW89_ETSI][1] = 127, [1][1][2][1][RTW89_MKK][1] = 127, @@ -33618,6 +33818,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][1] = 127, [1][1][2][1][RTW89_CHILE][1] = 127, [1][1][2][1][RTW89_QATAR][1] = 127, + [1][1][2][1][RTW89_THAILAND][1] = 127, [1][1][2][1][RTW89_FCC][2] = 60, [1][1][2][1][RTW89_ETSI][2] = 36, [1][1][2][1][RTW89_MKK][2] = 68, @@ -33630,6 +33831,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][2] = 36, [1][1][2][1][RTW89_CHILE][2] = 60, [1][1][2][1][RTW89_QATAR][2] = 36, + [1][1][2][1][RTW89_THAILAND][2] = 36, [1][1][2][1][RTW89_FCC][3] = 60, [1][1][2][1][RTW89_ETSI][3] = 36, [1][1][2][1][RTW89_MKK][3] = 68, @@ -33642,6 +33844,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][3] = 36, [1][1][2][1][RTW89_CHILE][3] = 44, [1][1][2][1][RTW89_QATAR][3] = 36, + [1][1][2][1][RTW89_THAILAND][3] = 36, [1][1][2][1][RTW89_FCC][4] = 60, [1][1][2][1][RTW89_ETSI][4] = 36, [1][1][2][1][RTW89_MKK][4] = 68, @@ -33654,6 +33857,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][4] = 36, [1][1][2][1][RTW89_CHILE][4] = 44, [1][1][2][1][RTW89_QATAR][4] = 36, + [1][1][2][1][RTW89_THAILAND][4] = 36, [1][1][2][1][RTW89_FCC][5] = 60, [1][1][2][1][RTW89_ETSI][5] = 36, [1][1][2][1][RTW89_MKK][5] = 68, @@ -33666,6 +33870,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][5] = 36, [1][1][2][1][RTW89_CHILE][5] = 60, [1][1][2][1][RTW89_QATAR][5] = 36, + [1][1][2][1][RTW89_THAILAND][5] = 36, [1][1][2][1][RTW89_FCC][6] = 58, [1][1][2][1][RTW89_ETSI][6] = 36, [1][1][2][1][RTW89_MKK][6] = 68, @@ -33678,6 +33883,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][6] = 36, [1][1][2][1][RTW89_CHILE][6] = 40, [1][1][2][1][RTW89_QATAR][6] = 36, + [1][1][2][1][RTW89_THAILAND][6] = 36, [1][1][2][1][RTW89_FCC][7] = 54, [1][1][2][1][RTW89_ETSI][7] = 36, [1][1][2][1][RTW89_MKK][7] = 68, @@ -33690,6 +33896,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][7] = 36, [1][1][2][1][RTW89_CHILE][7] = 40, [1][1][2][1][RTW89_QATAR][7] = 36, + [1][1][2][1][RTW89_THAILAND][7] = 36, [1][1][2][1][RTW89_FCC][8] = 54, [1][1][2][1][RTW89_ETSI][8] = 36, [1][1][2][1][RTW89_MKK][8] = 68, @@ -33702,6 +33909,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][8] = 36, [1][1][2][1][RTW89_CHILE][8] = 54, [1][1][2][1][RTW89_QATAR][8] = 36, + [1][1][2][1][RTW89_THAILAND][8] = 36, [1][1][2][1][RTW89_FCC][9] = 54, [1][1][2][1][RTW89_ETSI][9] = 36, [1][1][2][1][RTW89_MKK][9] = 68, @@ -33714,18 +33922,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][9] = 36, [1][1][2][1][RTW89_CHILE][9] = 54, [1][1][2][1][RTW89_QATAR][9] = 36, + [1][1][2][1][RTW89_THAILAND][9] = 36, [1][1][2][1][RTW89_FCC][10] = 46, [1][1][2][1][RTW89_ETSI][10] = 36, [1][1][2][1][RTW89_MKK][10] = 68, [1][1][2][1][RTW89_IC][10] = 46, [1][1][2][1][RTW89_KCC][10] = 64, [1][1][2][1][RTW89_ACMA][10] = 36, - [1][1][2][1][RTW89_CN][10] = 36, + [1][1][2][1][RTW89_CN][10] = 34, [1][1][2][1][RTW89_UK][10] = 36, [1][1][2][1][RTW89_MEXICO][10] = 46, [1][1][2][1][RTW89_UKRAINE][10] = 36, [1][1][2][1][RTW89_CHILE][10] = 46, [1][1][2][1][RTW89_QATAR][10] = 36, + [1][1][2][1][RTW89_THAILAND][10] = 36, [1][1][2][1][RTW89_FCC][11] = 127, [1][1][2][1][RTW89_ETSI][11] = 127, [1][1][2][1][RTW89_MKK][11] = 127, @@ -33738,6 +33948,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][11] = 127, [1][1][2][1][RTW89_CHILE][11] = 127, [1][1][2][1][RTW89_QATAR][11] = 127, + [1][1][2][1][RTW89_THAILAND][11] = 127, [1][1][2][1][RTW89_FCC][12] = 127, [1][1][2][1][RTW89_ETSI][12] = 127, [1][1][2][1][RTW89_MKK][12] = 127, @@ -33750,6 +33961,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][12] = 127, [1][1][2][1][RTW89_CHILE][12] = 127, [1][1][2][1][RTW89_QATAR][12] = 127, + [1][1][2][1][RTW89_THAILAND][12] = 127, [1][1][2][1][RTW89_FCC][13] = 127, [1][1][2][1][RTW89_ETSI][13] = 127, [1][1][2][1][RTW89_MKK][13] = 127, @@ -33762,6 +33974,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][13] = 127, [1][1][2][1][RTW89_CHILE][13] = 127, [1][1][2][1][RTW89_QATAR][13] = 127, + [1][1][2][1][RTW89_THAILAND][13] = 127, }; static @@ -33992,6 +34205,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][0] = 54, [0][0][1][0][RTW89_CHILE][0] = 70, [0][0][1][0][RTW89_QATAR][0] = 66, + [0][0][1][0][RTW89_THAILAND][0] = 66, [0][0][1][0][RTW89_FCC][2] = 72, [0][0][1][0][RTW89_ETSI][2] = 66, [0][0][1][0][RTW89_MKK][2] = 66, @@ -34004,6 +34218,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][2] = 54, [0][0][1][0][RTW89_CHILE][2] = 70, [0][0][1][0][RTW89_QATAR][2] = 66, + [0][0][1][0][RTW89_THAILAND][2] = 66, [0][0][1][0][RTW89_FCC][4] = 72, [0][0][1][0][RTW89_ETSI][4] = 66, [0][0][1][0][RTW89_MKK][4] = 66, @@ -34016,6 +34231,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][4] = 54, [0][0][1][0][RTW89_CHILE][4] = 70, [0][0][1][0][RTW89_QATAR][4] = 66, + [0][0][1][0][RTW89_THAILAND][4] = 66, [0][0][1][0][RTW89_FCC][6] = 72, [0][0][1][0][RTW89_ETSI][6] = 66, [0][0][1][0][RTW89_MKK][6] = 66, @@ -34028,6 +34244,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][6] = 54, [0][0][1][0][RTW89_CHILE][6] = 70, [0][0][1][0][RTW89_QATAR][6] = 66, + [0][0][1][0][RTW89_THAILAND][6] = 66, [0][0][1][0][RTW89_FCC][8] = 72, [0][0][1][0][RTW89_ETSI][8] = 66, [0][0][1][0][RTW89_MKK][8] = 66, @@ -34040,6 +34257,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][8] = 54, [0][0][1][0][RTW89_CHILE][8] = 70, [0][0][1][0][RTW89_QATAR][8] = 66, + [0][0][1][0][RTW89_THAILAND][8] = 66, [0][0][1][0][RTW89_FCC][10] = 72, [0][0][1][0][RTW89_ETSI][10] = 66, [0][0][1][0][RTW89_MKK][10] = 66, @@ -34052,6 +34270,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][10] = 54, [0][0][1][0][RTW89_CHILE][10] = 70, [0][0][1][0][RTW89_QATAR][10] = 66, + [0][0][1][0][RTW89_THAILAND][10] = 66, [0][0][1][0][RTW89_FCC][12] = 72, [0][0][1][0][RTW89_ETSI][12] = 66, [0][0][1][0][RTW89_MKK][12] = 66, @@ -34064,6 +34283,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][12] = 54, [0][0][1][0][RTW89_CHILE][12] = 70, [0][0][1][0][RTW89_QATAR][12] = 66, + [0][0][1][0][RTW89_THAILAND][12] = 66, [0][0][1][0][RTW89_FCC][14] = 70, [0][0][1][0][RTW89_ETSI][14] = 66, [0][0][1][0][RTW89_MKK][14] = 66, @@ -34076,6 +34296,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][14] = 54, [0][0][1][0][RTW89_CHILE][14] = 68, [0][0][1][0][RTW89_QATAR][14] = 66, + [0][0][1][0][RTW89_THAILAND][14] = 66, [0][0][1][0][RTW89_FCC][15] = 72, [0][0][1][0][RTW89_ETSI][15] = 66, [0][0][1][0][RTW89_MKK][15] = 70, @@ -34088,6 +34309,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][15] = 54, [0][0][1][0][RTW89_CHILE][15] = 70, [0][0][1][0][RTW89_QATAR][15] = 66, + [0][0][1][0][RTW89_THAILAND][15] = 66, [0][0][1][0][RTW89_FCC][17] = 72, [0][0][1][0][RTW89_ETSI][17] = 66, [0][0][1][0][RTW89_MKK][17] = 70, @@ -34100,6 +34322,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][17] = 54, [0][0][1][0][RTW89_CHILE][17] = 70, [0][0][1][0][RTW89_QATAR][17] = 66, + [0][0][1][0][RTW89_THAILAND][17] = 66, [0][0][1][0][RTW89_FCC][19] = 72, [0][0][1][0][RTW89_ETSI][19] = 66, [0][0][1][0][RTW89_MKK][19] = 70, @@ -34112,6 +34335,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][19] = 54, [0][0][1][0][RTW89_CHILE][19] = 70, [0][0][1][0][RTW89_QATAR][19] = 66, + [0][0][1][0][RTW89_THAILAND][19] = 66, [0][0][1][0][RTW89_FCC][21] = 72, [0][0][1][0][RTW89_ETSI][21] = 66, [0][0][1][0][RTW89_MKK][21] = 70, @@ -34124,6 +34348,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][21] = 54, [0][0][1][0][RTW89_CHILE][21] = 70, [0][0][1][0][RTW89_QATAR][21] = 66, + [0][0][1][0][RTW89_THAILAND][21] = 66, [0][0][1][0][RTW89_FCC][23] = 72, [0][0][1][0][RTW89_ETSI][23] = 66, [0][0][1][0][RTW89_MKK][23] = 70, @@ -34136,6 +34361,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][23] = 54, [0][0][1][0][RTW89_CHILE][23] = 70, [0][0][1][0][RTW89_QATAR][23] = 66, + [0][0][1][0][RTW89_THAILAND][23] = 66, [0][0][1][0][RTW89_FCC][25] = 72, [0][0][1][0][RTW89_ETSI][25] = 66, [0][0][1][0][RTW89_MKK][25] = 70, @@ -34148,6 +34374,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][25] = 54, [0][0][1][0][RTW89_CHILE][25] = 70, [0][0][1][0][RTW89_QATAR][25] = 66, + [0][0][1][0][RTW89_THAILAND][25] = 66, [0][0][1][0][RTW89_FCC][27] = 72, [0][0][1][0][RTW89_ETSI][27] = 66, [0][0][1][0][RTW89_MKK][27] = 70, @@ -34160,6 +34387,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][27] = 54, [0][0][1][0][RTW89_CHILE][27] = 58, [0][0][1][0][RTW89_QATAR][27] = 66, + [0][0][1][0][RTW89_THAILAND][27] = 66, [0][0][1][0][RTW89_FCC][29] = 72, [0][0][1][0][RTW89_ETSI][29] = 66, [0][0][1][0][RTW89_MKK][29] = 70, @@ -34172,6 +34400,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][29] = 54, [0][0][1][0][RTW89_CHILE][29] = 58, [0][0][1][0][RTW89_QATAR][29] = 66, + [0][0][1][0][RTW89_THAILAND][29] = 66, [0][0][1][0][RTW89_FCC][31] = 72, [0][0][1][0][RTW89_ETSI][31] = 66, [0][0][1][0][RTW89_MKK][31] = 70, @@ -34184,6 +34413,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][31] = 54, [0][0][1][0][RTW89_CHILE][31] = 58, [0][0][1][0][RTW89_QATAR][31] = 66, + [0][0][1][0][RTW89_THAILAND][31] = 66, [0][0][1][0][RTW89_FCC][33] = 72, [0][0][1][0][RTW89_ETSI][33] = 66, [0][0][1][0][RTW89_MKK][33] = 70, @@ -34196,6 +34426,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][33] = 54, [0][0][1][0][RTW89_CHILE][33] = 58, [0][0][1][0][RTW89_QATAR][33] = 66, + [0][0][1][0][RTW89_THAILAND][33] = 66, [0][0][1][0][RTW89_FCC][35] = 60, [0][0][1][0][RTW89_ETSI][35] = 66, [0][0][1][0][RTW89_MKK][35] = 70, @@ -34208,6 +34439,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][35] = 54, [0][0][1][0][RTW89_CHILE][35] = 58, [0][0][1][0][RTW89_QATAR][35] = 66, + [0][0][1][0][RTW89_THAILAND][35] = 66, [0][0][1][0][RTW89_FCC][37] = 72, [0][0][1][0][RTW89_ETSI][37] = 127, [0][0][1][0][RTW89_MKK][37] = 70, @@ -34220,66 +34452,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][37] = 127, [0][0][1][0][RTW89_CHILE][37] = 70, [0][0][1][0][RTW89_QATAR][37] = 127, + [0][0][1][0][RTW89_THAILAND][37] = 127, [0][0][1][0][RTW89_FCC][38] = 72, [0][0][1][0][RTW89_ETSI][38] = 30, [0][0][1][0][RTW89_MKK][38] = 127, [0][0][1][0][RTW89_IC][38] = 72, [0][0][1][0][RTW89_KCC][38] = 62, [0][0][1][0][RTW89_ACMA][38] = 70, - [0][0][1][0][RTW89_CN][38] = 68, + [0][0][1][0][RTW89_CN][38] = 54, [0][0][1][0][RTW89_UK][38] = 64, [0][0][1][0][RTW89_MEXICO][38] = 72, [0][0][1][0][RTW89_UKRAINE][38] = 30, [0][0][1][0][RTW89_CHILE][38] = 70, [0][0][1][0][RTW89_QATAR][38] = 30, + [0][0][1][0][RTW89_THAILAND][38] = 30, [0][0][1][0][RTW89_FCC][40] = 72, [0][0][1][0][RTW89_ETSI][40] = 30, [0][0][1][0][RTW89_MKK][40] = 127, [0][0][1][0][RTW89_IC][40] = 72, [0][0][1][0][RTW89_KCC][40] = 62, [0][0][1][0][RTW89_ACMA][40] = 70, - [0][0][1][0][RTW89_CN][40] = 68, + [0][0][1][0][RTW89_CN][40] = 54, [0][0][1][0][RTW89_UK][40] = 64, [0][0][1][0][RTW89_MEXICO][40] = 72, [0][0][1][0][RTW89_UKRAINE][40] = 30, [0][0][1][0][RTW89_CHILE][40] = 70, [0][0][1][0][RTW89_QATAR][40] = 30, + [0][0][1][0][RTW89_THAILAND][40] = 30, [0][0][1][0][RTW89_FCC][42] = 72, [0][0][1][0][RTW89_ETSI][42] = 30, [0][0][1][0][RTW89_MKK][42] = 127, [0][0][1][0][RTW89_IC][42] = 72, [0][0][1][0][RTW89_KCC][42] = 62, [0][0][1][0][RTW89_ACMA][42] = 70, - [0][0][1][0][RTW89_CN][42] = 68, + [0][0][1][0][RTW89_CN][42] = 54, [0][0][1][0][RTW89_UK][42] = 64, [0][0][1][0][RTW89_MEXICO][42] = 72, [0][0][1][0][RTW89_UKRAINE][42] = 30, [0][0][1][0][RTW89_CHILE][42] = 70, [0][0][1][0][RTW89_QATAR][42] = 30, + [0][0][1][0][RTW89_THAILAND][42] = 30, [0][0][1][0][RTW89_FCC][44] = 72, [0][0][1][0][RTW89_ETSI][44] = 30, [0][0][1][0][RTW89_MKK][44] = 127, [0][0][1][0][RTW89_IC][44] = 72, [0][0][1][0][RTW89_KCC][44] = 62, [0][0][1][0][RTW89_ACMA][44] = 70, - [0][0][1][0][RTW89_CN][44] = 68, + [0][0][1][0][RTW89_CN][44] = 54, [0][0][1][0][RTW89_UK][44] = 64, [0][0][1][0][RTW89_MEXICO][44] = 72, [0][0][1][0][RTW89_UKRAINE][44] = 30, [0][0][1][0][RTW89_CHILE][44] = 70, [0][0][1][0][RTW89_QATAR][44] = 30, + [0][0][1][0][RTW89_THAILAND][44] = 30, [0][0][1][0][RTW89_FCC][46] = 72, [0][0][1][0][RTW89_ETSI][46] = 30, [0][0][1][0][RTW89_MKK][46] = 127, [0][0][1][0][RTW89_IC][46] = 72, [0][0][1][0][RTW89_KCC][46] = 62, [0][0][1][0][RTW89_ACMA][46] = 70, - [0][0][1][0][RTW89_CN][46] = 68, + [0][0][1][0][RTW89_CN][46] = 54, [0][0][1][0][RTW89_UK][46] = 64, [0][0][1][0][RTW89_MEXICO][46] = 72, [0][0][1][0][RTW89_UKRAINE][46] = 30, [0][0][1][0][RTW89_CHILE][46] = 70, [0][0][1][0][RTW89_QATAR][46] = 30, + [0][0][1][0][RTW89_THAILAND][46] = 30, [0][0][1][0][RTW89_FCC][48] = 72, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, @@ -34292,6 +34530,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][48] = 127, [0][0][1][0][RTW89_CHILE][48] = 127, [0][0][1][0][RTW89_QATAR][48] = 127, + [0][0][1][0][RTW89_THAILAND][48] = 127, [0][0][1][0][RTW89_FCC][50] = 72, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, @@ -34304,6 +34543,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][50] = 127, [0][0][1][0][RTW89_CHILE][50] = 127, [0][0][1][0][RTW89_QATAR][50] = 127, + [0][0][1][0][RTW89_THAILAND][50] = 127, [0][0][1][0][RTW89_FCC][52] = 72, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, @@ -34316,6 +34556,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][52] = 127, [0][0][1][0][RTW89_CHILE][52] = 127, [0][0][1][0][RTW89_QATAR][52] = 127, + [0][0][1][0][RTW89_THAILAND][52] = 127, [0][1][1][0][RTW89_FCC][0] = 60, [0][1][1][0][RTW89_ETSI][0] = 54, [0][1][1][0][RTW89_MKK][0] = 54, @@ -34328,6 +34569,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][0] = 42, [0][1][1][0][RTW89_CHILE][0] = 60, [0][1][1][0][RTW89_QATAR][0] = 54, + [0][1][1][0][RTW89_THAILAND][0] = 54, [0][1][1][0][RTW89_FCC][2] = 60, [0][1][1][0][RTW89_ETSI][2] = 54, [0][1][1][0][RTW89_MKK][2] = 54, @@ -34340,6 +34582,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][2] = 42, [0][1][1][0][RTW89_CHILE][2] = 60, [0][1][1][0][RTW89_QATAR][2] = 54, + [0][1][1][0][RTW89_THAILAND][2] = 54, [0][1][1][0][RTW89_FCC][4] = 60, [0][1][1][0][RTW89_ETSI][4] = 54, [0][1][1][0][RTW89_MKK][4] = 54, @@ -34352,6 +34595,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][4] = 42, [0][1][1][0][RTW89_CHILE][4] = 60, [0][1][1][0][RTW89_QATAR][4] = 54, + [0][1][1][0][RTW89_THAILAND][4] = 54, [0][1][1][0][RTW89_FCC][6] = 60, [0][1][1][0][RTW89_ETSI][6] = 54, [0][1][1][0][RTW89_MKK][6] = 54, @@ -34364,6 +34608,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][6] = 42, [0][1][1][0][RTW89_CHILE][6] = 60, [0][1][1][0][RTW89_QATAR][6] = 54, + [0][1][1][0][RTW89_THAILAND][6] = 54, [0][1][1][0][RTW89_FCC][8] = 62, [0][1][1][0][RTW89_ETSI][8] = 54, [0][1][1][0][RTW89_MKK][8] = 52, @@ -34376,6 +34621,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][8] = 42, [0][1][1][0][RTW89_CHILE][8] = 62, [0][1][1][0][RTW89_QATAR][8] = 54, + [0][1][1][0][RTW89_THAILAND][8] = 54, [0][1][1][0][RTW89_FCC][10] = 62, [0][1][1][0][RTW89_ETSI][10] = 54, [0][1][1][0][RTW89_MKK][10] = 54, @@ -34388,6 +34634,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][10] = 42, [0][1][1][0][RTW89_CHILE][10] = 62, [0][1][1][0][RTW89_QATAR][10] = 54, + [0][1][1][0][RTW89_THAILAND][10] = 54, [0][1][1][0][RTW89_FCC][12] = 62, [0][1][1][0][RTW89_ETSI][12] = 54, [0][1][1][0][RTW89_MKK][12] = 54, @@ -34400,6 +34647,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][12] = 42, [0][1][1][0][RTW89_CHILE][12] = 62, [0][1][1][0][RTW89_QATAR][12] = 54, + [0][1][1][0][RTW89_THAILAND][12] = 54, [0][1][1][0][RTW89_FCC][14] = 60, [0][1][1][0][RTW89_ETSI][14] = 54, [0][1][1][0][RTW89_MKK][14] = 54, @@ -34412,6 +34660,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][14] = 42, [0][1][1][0][RTW89_CHILE][14] = 60, [0][1][1][0][RTW89_QATAR][14] = 54, + [0][1][1][0][RTW89_THAILAND][14] = 54, [0][1][1][0][RTW89_FCC][15] = 60, [0][1][1][0][RTW89_ETSI][15] = 54, [0][1][1][0][RTW89_MKK][15] = 70, @@ -34424,6 +34673,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][15] = 42, [0][1][1][0][RTW89_CHILE][15] = 60, [0][1][1][0][RTW89_QATAR][15] = 54, + [0][1][1][0][RTW89_THAILAND][15] = 54, [0][1][1][0][RTW89_FCC][17] = 60, [0][1][1][0][RTW89_ETSI][17] = 54, [0][1][1][0][RTW89_MKK][17] = 70, @@ -34436,6 +34686,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][17] = 42, [0][1][1][0][RTW89_CHILE][17] = 60, [0][1][1][0][RTW89_QATAR][17] = 54, + [0][1][1][0][RTW89_THAILAND][17] = 54, [0][1][1][0][RTW89_FCC][19] = 60, [0][1][1][0][RTW89_ETSI][19] = 54, [0][1][1][0][RTW89_MKK][19] = 70, @@ -34448,6 +34699,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][19] = 42, [0][1][1][0][RTW89_CHILE][19] = 60, [0][1][1][0][RTW89_QATAR][19] = 54, + [0][1][1][0][RTW89_THAILAND][19] = 54, [0][1][1][0][RTW89_FCC][21] = 60, [0][1][1][0][RTW89_ETSI][21] = 54, [0][1][1][0][RTW89_MKK][21] = 70, @@ -34460,6 +34712,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][21] = 42, [0][1][1][0][RTW89_CHILE][21] = 60, [0][1][1][0][RTW89_QATAR][21] = 54, + [0][1][1][0][RTW89_THAILAND][21] = 54, [0][1][1][0][RTW89_FCC][23] = 60, [0][1][1][0][RTW89_ETSI][23] = 54, [0][1][1][0][RTW89_MKK][23] = 70, @@ -34472,6 +34725,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][23] = 42, [0][1][1][0][RTW89_CHILE][23] = 60, [0][1][1][0][RTW89_QATAR][23] = 54, + [0][1][1][0][RTW89_THAILAND][23] = 54, [0][1][1][0][RTW89_FCC][25] = 60, [0][1][1][0][RTW89_ETSI][25] = 54, [0][1][1][0][RTW89_MKK][25] = 70, @@ -34484,6 +34738,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][25] = 42, [0][1][1][0][RTW89_CHILE][25] = 60, [0][1][1][0][RTW89_QATAR][25] = 54, + [0][1][1][0][RTW89_THAILAND][25] = 54, [0][1][1][0][RTW89_FCC][27] = 60, [0][1][1][0][RTW89_ETSI][27] = 54, [0][1][1][0][RTW89_MKK][27] = 70, @@ -34496,6 +34751,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][27] = 42, [0][1][1][0][RTW89_CHILE][27] = 52, [0][1][1][0][RTW89_QATAR][27] = 54, + [0][1][1][0][RTW89_THAILAND][27] = 54, [0][1][1][0][RTW89_FCC][29] = 60, [0][1][1][0][RTW89_ETSI][29] = 54, [0][1][1][0][RTW89_MKK][29] = 70, @@ -34508,6 +34764,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][29] = 42, [0][1][1][0][RTW89_CHILE][29] = 52, [0][1][1][0][RTW89_QATAR][29] = 54, + [0][1][1][0][RTW89_THAILAND][29] = 54, [0][1][1][0][RTW89_FCC][31] = 60, [0][1][1][0][RTW89_ETSI][31] = 54, [0][1][1][0][RTW89_MKK][31] = 70, @@ -34520,6 +34777,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][31] = 42, [0][1][1][0][RTW89_CHILE][31] = 52, [0][1][1][0][RTW89_QATAR][31] = 54, + [0][1][1][0][RTW89_THAILAND][31] = 54, [0][1][1][0][RTW89_FCC][33] = 60, [0][1][1][0][RTW89_ETSI][33] = 54, [0][1][1][0][RTW89_MKK][33] = 70, @@ -34532,6 +34790,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][33] = 42, [0][1][1][0][RTW89_CHILE][33] = 52, [0][1][1][0][RTW89_QATAR][33] = 54, + [0][1][1][0][RTW89_THAILAND][33] = 54, [0][1][1][0][RTW89_FCC][35] = 52, [0][1][1][0][RTW89_ETSI][35] = 54, [0][1][1][0][RTW89_MKK][35] = 70, @@ -34544,6 +34803,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][35] = 42, [0][1][1][0][RTW89_CHILE][35] = 52, [0][1][1][0][RTW89_QATAR][35] = 54, + [0][1][1][0][RTW89_THAILAND][35] = 54, [0][1][1][0][RTW89_FCC][37] = 62, [0][1][1][0][RTW89_ETSI][37] = 127, [0][1][1][0][RTW89_MKK][37] = 70, @@ -34556,66 +34816,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][37] = 127, [0][1][1][0][RTW89_CHILE][37] = 62, [0][1][1][0][RTW89_QATAR][37] = 127, + [0][1][1][0][RTW89_THAILAND][37] = 127, [0][1][1][0][RTW89_FCC][38] = 72, [0][1][1][0][RTW89_ETSI][38] = 18, [0][1][1][0][RTW89_MKK][38] = 127, [0][1][1][0][RTW89_IC][38] = 72, [0][1][1][0][RTW89_KCC][38] = 60, [0][1][1][0][RTW89_ACMA][38] = 70, - [0][1][1][0][RTW89_CN][38] = 64, + [0][1][1][0][RTW89_CN][38] = 54, [0][1][1][0][RTW89_UK][38] = 52, [0][1][1][0][RTW89_MEXICO][38] = 72, [0][1][1][0][RTW89_UKRAINE][38] = 18, [0][1][1][0][RTW89_CHILE][38] = 70, [0][1][1][0][RTW89_QATAR][38] = 18, + [0][1][1][0][RTW89_THAILAND][38] = 18, [0][1][1][0][RTW89_FCC][40] = 72, [0][1][1][0][RTW89_ETSI][40] = 18, [0][1][1][0][RTW89_MKK][40] = 127, [0][1][1][0][RTW89_IC][40] = 72, [0][1][1][0][RTW89_KCC][40] = 60, [0][1][1][0][RTW89_ACMA][40] = 70, - [0][1][1][0][RTW89_CN][40] = 64, + [0][1][1][0][RTW89_CN][40] = 54, [0][1][1][0][RTW89_UK][40] = 52, [0][1][1][0][RTW89_MEXICO][40] = 72, [0][1][1][0][RTW89_UKRAINE][40] = 18, [0][1][1][0][RTW89_CHILE][40] = 70, [0][1][1][0][RTW89_QATAR][40] = 18, + [0][1][1][0][RTW89_THAILAND][40] = 18, [0][1][1][0][RTW89_FCC][42] = 72, [0][1][1][0][RTW89_ETSI][42] = 18, [0][1][1][0][RTW89_MKK][42] = 127, [0][1][1][0][RTW89_IC][42] = 72, [0][1][1][0][RTW89_KCC][42] = 60, [0][1][1][0][RTW89_ACMA][42] = 70, - [0][1][1][0][RTW89_CN][42] = 64, + [0][1][1][0][RTW89_CN][42] = 54, [0][1][1][0][RTW89_UK][42] = 52, [0][1][1][0][RTW89_MEXICO][42] = 72, [0][1][1][0][RTW89_UKRAINE][42] = 18, [0][1][1][0][RTW89_CHILE][42] = 70, [0][1][1][0][RTW89_QATAR][42] = 18, + [0][1][1][0][RTW89_THAILAND][42] = 18, [0][1][1][0][RTW89_FCC][44] = 72, [0][1][1][0][RTW89_ETSI][44] = 18, [0][1][1][0][RTW89_MKK][44] = 127, [0][1][1][0][RTW89_IC][44] = 72, [0][1][1][0][RTW89_KCC][44] = 60, [0][1][1][0][RTW89_ACMA][44] = 70, - [0][1][1][0][RTW89_CN][44] = 60, + [0][1][1][0][RTW89_CN][44] = 54, [0][1][1][0][RTW89_UK][44] = 52, [0][1][1][0][RTW89_MEXICO][44] = 72, [0][1][1][0][RTW89_UKRAINE][44] = 18, [0][1][1][0][RTW89_CHILE][44] = 70, [0][1][1][0][RTW89_QATAR][44] = 18, + [0][1][1][0][RTW89_THAILAND][44] = 18, [0][1][1][0][RTW89_FCC][46] = 72, [0][1][1][0][RTW89_ETSI][46] = 18, [0][1][1][0][RTW89_MKK][46] = 127, [0][1][1][0][RTW89_IC][46] = 72, [0][1][1][0][RTW89_KCC][46] = 60, [0][1][1][0][RTW89_ACMA][46] = 70, - [0][1][1][0][RTW89_CN][46] = 60, + [0][1][1][0][RTW89_CN][46] = 54, [0][1][1][0][RTW89_UK][46] = 52, [0][1][1][0][RTW89_MEXICO][46] = 72, [0][1][1][0][RTW89_UKRAINE][46] = 18, [0][1][1][0][RTW89_CHILE][46] = 70, [0][1][1][0][RTW89_QATAR][46] = 18, + [0][1][1][0][RTW89_THAILAND][46] = 18, [0][1][1][0][RTW89_FCC][48] = 48, [0][1][1][0][RTW89_ETSI][48] = 127, [0][1][1][0][RTW89_MKK][48] = 127, @@ -34628,6 +34894,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][48] = 127, [0][1][1][0][RTW89_CHILE][48] = 127, [0][1][1][0][RTW89_QATAR][48] = 127, + [0][1][1][0][RTW89_THAILAND][48] = 127, [0][1][1][0][RTW89_FCC][50] = 48, [0][1][1][0][RTW89_ETSI][50] = 127, [0][1][1][0][RTW89_MKK][50] = 127, @@ -34640,6 +34907,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][50] = 127, [0][1][1][0][RTW89_CHILE][50] = 127, [0][1][1][0][RTW89_QATAR][50] = 127, + [0][1][1][0][RTW89_THAILAND][50] = 127, [0][1][1][0][RTW89_FCC][52] = 48, [0][1][1][0][RTW89_ETSI][52] = 127, [0][1][1][0][RTW89_MKK][52] = 127, @@ -34652,6 +34920,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][52] = 127, [0][1][1][0][RTW89_CHILE][52] = 127, [0][1][1][0][RTW89_QATAR][52] = 127, + [0][1][1][0][RTW89_THAILAND][52] = 127, [0][0][2][0][RTW89_FCC][0] = 70, [0][0][2][0][RTW89_ETSI][0] = 66, [0][0][2][0][RTW89_MKK][0] = 68, @@ -34664,6 +34933,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][0] = 54, [0][0][2][0][RTW89_CHILE][0] = 68, [0][0][2][0][RTW89_QATAR][0] = 66, + [0][0][2][0][RTW89_THAILAND][0] = 66, [0][0][2][0][RTW89_FCC][2] = 72, [0][0][2][0][RTW89_ETSI][2] = 66, [0][0][2][0][RTW89_MKK][2] = 68, @@ -34676,6 +34946,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][2] = 54, [0][0][2][0][RTW89_CHILE][2] = 70, [0][0][2][0][RTW89_QATAR][2] = 66, + [0][0][2][0][RTW89_THAILAND][2] = 66, [0][0][2][0][RTW89_FCC][4] = 72, [0][0][2][0][RTW89_ETSI][4] = 66, [0][0][2][0][RTW89_MKK][4] = 68, @@ -34688,6 +34959,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][4] = 54, [0][0][2][0][RTW89_CHILE][4] = 70, [0][0][2][0][RTW89_QATAR][4] = 66, + [0][0][2][0][RTW89_THAILAND][4] = 66, [0][0][2][0][RTW89_FCC][6] = 72, [0][0][2][0][RTW89_ETSI][6] = 66, [0][0][2][0][RTW89_MKK][6] = 60, @@ -34700,6 +34972,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][6] = 54, [0][0][2][0][RTW89_CHILE][6] = 70, [0][0][2][0][RTW89_QATAR][6] = 66, + [0][0][2][0][RTW89_THAILAND][6] = 66, [0][0][2][0][RTW89_FCC][8] = 72, [0][0][2][0][RTW89_ETSI][8] = 66, [0][0][2][0][RTW89_MKK][8] = 58, @@ -34712,6 +34985,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][8] = 54, [0][0][2][0][RTW89_CHILE][8] = 70, [0][0][2][0][RTW89_QATAR][8] = 66, + [0][0][2][0][RTW89_THAILAND][8] = 66, [0][0][2][0][RTW89_FCC][10] = 72, [0][0][2][0][RTW89_ETSI][10] = 66, [0][0][2][0][RTW89_MKK][10] = 70, @@ -34724,6 +34998,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][10] = 54, [0][0][2][0][RTW89_CHILE][10] = 70, [0][0][2][0][RTW89_QATAR][10] = 66, + [0][0][2][0][RTW89_THAILAND][10] = 66, [0][0][2][0][RTW89_FCC][12] = 72, [0][0][2][0][RTW89_ETSI][12] = 66, [0][0][2][0][RTW89_MKK][12] = 70, @@ -34736,6 +35011,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][12] = 54, [0][0][2][0][RTW89_CHILE][12] = 70, [0][0][2][0][RTW89_QATAR][12] = 66, + [0][0][2][0][RTW89_THAILAND][12] = 66, [0][0][2][0][RTW89_FCC][14] = 68, [0][0][2][0][RTW89_ETSI][14] = 66, [0][0][2][0][RTW89_MKK][14] = 70, @@ -34748,6 +35024,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][14] = 54, [0][0][2][0][RTW89_CHILE][14] = 66, [0][0][2][0][RTW89_QATAR][14] = 66, + [0][0][2][0][RTW89_THAILAND][14] = 66, [0][0][2][0][RTW89_FCC][15] = 70, [0][0][2][0][RTW89_ETSI][15] = 66, [0][0][2][0][RTW89_MKK][15] = 70, @@ -34760,6 +35037,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][15] = 54, [0][0][2][0][RTW89_CHILE][15] = 68, [0][0][2][0][RTW89_QATAR][15] = 66, + [0][0][2][0][RTW89_THAILAND][15] = 66, [0][0][2][0][RTW89_FCC][17] = 72, [0][0][2][0][RTW89_ETSI][17] = 66, [0][0][2][0][RTW89_MKK][17] = 70, @@ -34772,6 +35050,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][17] = 54, [0][0][2][0][RTW89_CHILE][17] = 68, [0][0][2][0][RTW89_QATAR][17] = 66, + [0][0][2][0][RTW89_THAILAND][17] = 66, [0][0][2][0][RTW89_FCC][19] = 72, [0][0][2][0][RTW89_ETSI][19] = 66, [0][0][2][0][RTW89_MKK][19] = 70, @@ -34784,6 +35063,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][19] = 54, [0][0][2][0][RTW89_CHILE][19] = 68, [0][0][2][0][RTW89_QATAR][19] = 66, + [0][0][2][0][RTW89_THAILAND][19] = 66, [0][0][2][0][RTW89_FCC][21] = 72, [0][0][2][0][RTW89_ETSI][21] = 66, [0][0][2][0][RTW89_MKK][21] = 70, @@ -34796,6 +35076,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][21] = 54, [0][0][2][0][RTW89_CHILE][21] = 70, [0][0][2][0][RTW89_QATAR][21] = 66, + [0][0][2][0][RTW89_THAILAND][21] = 66, [0][0][2][0][RTW89_FCC][23] = 72, [0][0][2][0][RTW89_ETSI][23] = 66, [0][0][2][0][RTW89_MKK][23] = 70, @@ -34808,6 +35089,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][23] = 54, [0][0][2][0][RTW89_CHILE][23] = 70, [0][0][2][0][RTW89_QATAR][23] = 66, + [0][0][2][0][RTW89_THAILAND][23] = 66, [0][0][2][0][RTW89_FCC][25] = 72, [0][0][2][0][RTW89_ETSI][25] = 66, [0][0][2][0][RTW89_MKK][25] = 70, @@ -34820,6 +35102,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][25] = 54, [0][0][2][0][RTW89_CHILE][25] = 70, [0][0][2][0][RTW89_QATAR][25] = 66, + [0][0][2][0][RTW89_THAILAND][25] = 66, [0][0][2][0][RTW89_FCC][27] = 72, [0][0][2][0][RTW89_ETSI][27] = 66, [0][0][2][0][RTW89_MKK][27] = 70, @@ -34832,6 +35115,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][27] = 54, [0][0][2][0][RTW89_CHILE][27] = 56, [0][0][2][0][RTW89_QATAR][27] = 66, + [0][0][2][0][RTW89_THAILAND][27] = 66, [0][0][2][0][RTW89_FCC][29] = 72, [0][0][2][0][RTW89_ETSI][29] = 66, [0][0][2][0][RTW89_MKK][29] = 70, @@ -34844,6 +35128,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][29] = 54, [0][0][2][0][RTW89_CHILE][29] = 56, [0][0][2][0][RTW89_QATAR][29] = 66, + [0][0][2][0][RTW89_THAILAND][29] = 66, [0][0][2][0][RTW89_FCC][31] = 72, [0][0][2][0][RTW89_ETSI][31] = 66, [0][0][2][0][RTW89_MKK][31] = 70, @@ -34856,6 +35141,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][31] = 54, [0][0][2][0][RTW89_CHILE][31] = 56, [0][0][2][0][RTW89_QATAR][31] = 66, + [0][0][2][0][RTW89_THAILAND][31] = 66, [0][0][2][0][RTW89_FCC][33] = 72, [0][0][2][0][RTW89_ETSI][33] = 66, [0][0][2][0][RTW89_MKK][33] = 70, @@ -34868,6 +35154,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][33] = 54, [0][0][2][0][RTW89_CHILE][33] = 56, [0][0][2][0][RTW89_QATAR][33] = 66, + [0][0][2][0][RTW89_THAILAND][33] = 66, [0][0][2][0][RTW89_FCC][35] = 56, [0][0][2][0][RTW89_ETSI][35] = 66, [0][0][2][0][RTW89_MKK][35] = 70, @@ -34880,6 +35167,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][35] = 54, [0][0][2][0][RTW89_CHILE][35] = 56, [0][0][2][0][RTW89_QATAR][35] = 66, + [0][0][2][0][RTW89_THAILAND][35] = 66, [0][0][2][0][RTW89_FCC][37] = 72, [0][0][2][0][RTW89_ETSI][37] = 127, [0][0][2][0][RTW89_MKK][37] = 70, @@ -34892,66 +35180,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][37] = 127, [0][0][2][0][RTW89_CHILE][37] = 70, [0][0][2][0][RTW89_QATAR][37] = 127, + [0][0][2][0][RTW89_THAILAND][37] = 127, [0][0][2][0][RTW89_FCC][38] = 72, [0][0][2][0][RTW89_ETSI][38] = 30, [0][0][2][0][RTW89_MKK][38] = 127, [0][0][2][0][RTW89_IC][38] = 72, [0][0][2][0][RTW89_KCC][38] = 58, [0][0][2][0][RTW89_ACMA][38] = 70, - [0][0][2][0][RTW89_CN][38] = 68, + [0][0][2][0][RTW89_CN][38] = 56, [0][0][2][0][RTW89_UK][38] = 64, [0][0][2][0][RTW89_MEXICO][38] = 72, [0][0][2][0][RTW89_UKRAINE][38] = 30, [0][0][2][0][RTW89_CHILE][38] = 70, [0][0][2][0][RTW89_QATAR][38] = 30, + [0][0][2][0][RTW89_THAILAND][38] = 30, [0][0][2][0][RTW89_FCC][40] = 72, [0][0][2][0][RTW89_ETSI][40] = 30, [0][0][2][0][RTW89_MKK][40] = 127, [0][0][2][0][RTW89_IC][40] = 72, [0][0][2][0][RTW89_KCC][40] = 58, [0][0][2][0][RTW89_ACMA][40] = 70, - [0][0][2][0][RTW89_CN][40] = 68, + [0][0][2][0][RTW89_CN][40] = 56, [0][0][2][0][RTW89_UK][40] = 64, [0][0][2][0][RTW89_MEXICO][40] = 72, [0][0][2][0][RTW89_UKRAINE][40] = 30, [0][0][2][0][RTW89_CHILE][40] = 70, [0][0][2][0][RTW89_QATAR][40] = 30, + [0][0][2][0][RTW89_THAILAND][40] = 30, [0][0][2][0][RTW89_FCC][42] = 72, [0][0][2][0][RTW89_ETSI][42] = 30, [0][0][2][0][RTW89_MKK][42] = 127, [0][0][2][0][RTW89_IC][42] = 72, [0][0][2][0][RTW89_KCC][42] = 58, [0][0][2][0][RTW89_ACMA][42] = 70, - [0][0][2][0][RTW89_CN][42] = 68, + [0][0][2][0][RTW89_CN][42] = 56, [0][0][2][0][RTW89_UK][42] = 64, [0][0][2][0][RTW89_MEXICO][42] = 72, [0][0][2][0][RTW89_UKRAINE][42] = 30, [0][0][2][0][RTW89_CHILE][42] = 70, [0][0][2][0][RTW89_QATAR][42] = 30, + [0][0][2][0][RTW89_THAILAND][42] = 30, [0][0][2][0][RTW89_FCC][44] = 72, [0][0][2][0][RTW89_ETSI][44] = 30, [0][0][2][0][RTW89_MKK][44] = 127, [0][0][2][0][RTW89_IC][44] = 72, [0][0][2][0][RTW89_KCC][44] = 58, [0][0][2][0][RTW89_ACMA][44] = 70, - [0][0][2][0][RTW89_CN][44] = 68, + [0][0][2][0][RTW89_CN][44] = 56, [0][0][2][0][RTW89_UK][44] = 64, [0][0][2][0][RTW89_MEXICO][44] = 72, [0][0][2][0][RTW89_UKRAINE][44] = 30, [0][0][2][0][RTW89_CHILE][44] = 70, [0][0][2][0][RTW89_QATAR][44] = 30, + [0][0][2][0][RTW89_THAILAND][44] = 30, [0][0][2][0][RTW89_FCC][46] = 72, [0][0][2][0][RTW89_ETSI][46] = 30, [0][0][2][0][RTW89_MKK][46] = 127, [0][0][2][0][RTW89_IC][46] = 72, [0][0][2][0][RTW89_KCC][46] = 58, [0][0][2][0][RTW89_ACMA][46] = 70, - [0][0][2][0][RTW89_CN][46] = 68, + [0][0][2][0][RTW89_CN][46] = 56, [0][0][2][0][RTW89_UK][46] = 64, [0][0][2][0][RTW89_MEXICO][46] = 72, [0][0][2][0][RTW89_UKRAINE][46] = 30, [0][0][2][0][RTW89_CHILE][46] = 70, [0][0][2][0][RTW89_QATAR][46] = 30, + [0][0][2][0][RTW89_THAILAND][46] = 30, [0][0][2][0][RTW89_FCC][48] = 72, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, @@ -34964,6 +35258,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][48] = 127, [0][0][2][0][RTW89_CHILE][48] = 127, [0][0][2][0][RTW89_QATAR][48] = 127, + [0][0][2][0][RTW89_THAILAND][48] = 127, [0][0][2][0][RTW89_FCC][50] = 72, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, @@ -34976,6 +35271,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][50] = 127, [0][0][2][0][RTW89_CHILE][50] = 127, [0][0][2][0][RTW89_QATAR][50] = 127, + [0][0][2][0][RTW89_THAILAND][50] = 127, [0][0][2][0][RTW89_FCC][52] = 72, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, @@ -34988,6 +35284,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][52] = 127, [0][0][2][0][RTW89_CHILE][52] = 127, [0][0][2][0][RTW89_QATAR][52] = 127, + [0][0][2][0][RTW89_THAILAND][52] = 127, [0][1][2][0][RTW89_FCC][0] = 60, [0][1][2][0][RTW89_ETSI][0] = 54, [0][1][2][0][RTW89_MKK][0] = 54, @@ -35000,6 +35297,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][0] = 42, [0][1][2][0][RTW89_CHILE][0] = 60, [0][1][2][0][RTW89_QATAR][0] = 54, + [0][1][2][0][RTW89_THAILAND][0] = 54, [0][1][2][0][RTW89_FCC][2] = 62, [0][1][2][0][RTW89_ETSI][2] = 54, [0][1][2][0][RTW89_MKK][2] = 54, @@ -35012,6 +35310,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][2] = 42, [0][1][2][0][RTW89_CHILE][2] = 62, [0][1][2][0][RTW89_QATAR][2] = 54, + [0][1][2][0][RTW89_THAILAND][2] = 54, [0][1][2][0][RTW89_FCC][4] = 62, [0][1][2][0][RTW89_ETSI][4] = 54, [0][1][2][0][RTW89_MKK][4] = 54, @@ -35024,6 +35323,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][4] = 42, [0][1][2][0][RTW89_CHILE][4] = 62, [0][1][2][0][RTW89_QATAR][4] = 54, + [0][1][2][0][RTW89_THAILAND][4] = 54, [0][1][2][0][RTW89_FCC][6] = 62, [0][1][2][0][RTW89_ETSI][6] = 54, [0][1][2][0][RTW89_MKK][6] = 50, @@ -35036,6 +35336,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][6] = 42, [0][1][2][0][RTW89_CHILE][6] = 62, [0][1][2][0][RTW89_QATAR][6] = 54, + [0][1][2][0][RTW89_THAILAND][6] = 54, [0][1][2][0][RTW89_FCC][8] = 62, [0][1][2][0][RTW89_ETSI][8] = 54, [0][1][2][0][RTW89_MKK][8] = 42, @@ -35048,6 +35349,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][8] = 42, [0][1][2][0][RTW89_CHILE][8] = 62, [0][1][2][0][RTW89_QATAR][8] = 54, + [0][1][2][0][RTW89_THAILAND][8] = 54, [0][1][2][0][RTW89_FCC][10] = 62, [0][1][2][0][RTW89_ETSI][10] = 54, [0][1][2][0][RTW89_MKK][10] = 54, @@ -35060,6 +35362,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][10] = 42, [0][1][2][0][RTW89_CHILE][10] = 62, [0][1][2][0][RTW89_QATAR][10] = 54, + [0][1][2][0][RTW89_THAILAND][10] = 54, [0][1][2][0][RTW89_FCC][12] = 62, [0][1][2][0][RTW89_ETSI][12] = 54, [0][1][2][0][RTW89_MKK][12] = 54, @@ -35072,6 +35375,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][12] = 42, [0][1][2][0][RTW89_CHILE][12] = 62, [0][1][2][0][RTW89_QATAR][12] = 54, + [0][1][2][0][RTW89_THAILAND][12] = 54, [0][1][2][0][RTW89_FCC][14] = 62, [0][1][2][0][RTW89_ETSI][14] = 54, [0][1][2][0][RTW89_MKK][14] = 54, @@ -35084,6 +35388,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][14] = 42, [0][1][2][0][RTW89_CHILE][14] = 62, [0][1][2][0][RTW89_QATAR][14] = 54, + [0][1][2][0][RTW89_THAILAND][14] = 54, [0][1][2][0][RTW89_FCC][15] = 60, [0][1][2][0][RTW89_ETSI][15] = 54, [0][1][2][0][RTW89_MKK][15] = 68, @@ -35096,6 +35401,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][15] = 42, [0][1][2][0][RTW89_CHILE][15] = 60, [0][1][2][0][RTW89_QATAR][15] = 54, + [0][1][2][0][RTW89_THAILAND][15] = 54, [0][1][2][0][RTW89_FCC][17] = 62, [0][1][2][0][RTW89_ETSI][17] = 54, [0][1][2][0][RTW89_MKK][17] = 68, @@ -35108,6 +35414,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][17] = 42, [0][1][2][0][RTW89_CHILE][17] = 60, [0][1][2][0][RTW89_QATAR][17] = 54, + [0][1][2][0][RTW89_THAILAND][17] = 54, [0][1][2][0][RTW89_FCC][19] = 62, [0][1][2][0][RTW89_ETSI][19] = 54, [0][1][2][0][RTW89_MKK][19] = 68, @@ -35120,6 +35427,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][19] = 42, [0][1][2][0][RTW89_CHILE][19] = 62, [0][1][2][0][RTW89_QATAR][19] = 54, + [0][1][2][0][RTW89_THAILAND][19] = 54, [0][1][2][0][RTW89_FCC][21] = 62, [0][1][2][0][RTW89_ETSI][21] = 54, [0][1][2][0][RTW89_MKK][21] = 68, @@ -35132,6 +35440,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][21] = 42, [0][1][2][0][RTW89_CHILE][21] = 62, [0][1][2][0][RTW89_QATAR][21] = 54, + [0][1][2][0][RTW89_THAILAND][21] = 54, [0][1][2][0][RTW89_FCC][23] = 62, [0][1][2][0][RTW89_ETSI][23] = 54, [0][1][2][0][RTW89_MKK][23] = 68, @@ -35144,6 +35453,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][23] = 42, [0][1][2][0][RTW89_CHILE][23] = 62, [0][1][2][0][RTW89_QATAR][23] = 54, + [0][1][2][0][RTW89_THAILAND][23] = 54, [0][1][2][0][RTW89_FCC][25] = 62, [0][1][2][0][RTW89_ETSI][25] = 54, [0][1][2][0][RTW89_MKK][25] = 68, @@ -35156,6 +35466,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][25] = 42, [0][1][2][0][RTW89_CHILE][25] = 62, [0][1][2][0][RTW89_QATAR][25] = 54, + [0][1][2][0][RTW89_THAILAND][25] = 54, [0][1][2][0][RTW89_FCC][27] = 62, [0][1][2][0][RTW89_ETSI][27] = 54, [0][1][2][0][RTW89_MKK][27] = 68, @@ -35168,6 +35479,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][27] = 42, [0][1][2][0][RTW89_CHILE][27] = 46, [0][1][2][0][RTW89_QATAR][27] = 54, + [0][1][2][0][RTW89_THAILAND][27] = 54, [0][1][2][0][RTW89_FCC][29] = 62, [0][1][2][0][RTW89_ETSI][29] = 54, [0][1][2][0][RTW89_MKK][29] = 68, @@ -35180,6 +35492,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][29] = 42, [0][1][2][0][RTW89_CHILE][29] = 46, [0][1][2][0][RTW89_QATAR][29] = 54, + [0][1][2][0][RTW89_THAILAND][29] = 54, [0][1][2][0][RTW89_FCC][31] = 62, [0][1][2][0][RTW89_ETSI][31] = 54, [0][1][2][0][RTW89_MKK][31] = 68, @@ -35192,6 +35505,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][31] = 42, [0][1][2][0][RTW89_CHILE][31] = 46, [0][1][2][0][RTW89_QATAR][31] = 54, + [0][1][2][0][RTW89_THAILAND][31] = 54, [0][1][2][0][RTW89_FCC][33] = 62, [0][1][2][0][RTW89_ETSI][33] = 54, [0][1][2][0][RTW89_MKK][33] = 68, @@ -35204,6 +35518,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][33] = 42, [0][1][2][0][RTW89_CHILE][33] = 46, [0][1][2][0][RTW89_QATAR][33] = 54, + [0][1][2][0][RTW89_THAILAND][33] = 54, [0][1][2][0][RTW89_FCC][35] = 46, [0][1][2][0][RTW89_ETSI][35] = 54, [0][1][2][0][RTW89_MKK][35] = 68, @@ -35216,6 +35531,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][35] = 42, [0][1][2][0][RTW89_CHILE][35] = 46, [0][1][2][0][RTW89_QATAR][35] = 54, + [0][1][2][0][RTW89_THAILAND][35] = 54, [0][1][2][0][RTW89_FCC][37] = 64, [0][1][2][0][RTW89_ETSI][37] = 127, [0][1][2][0][RTW89_MKK][37] = 68, @@ -35228,66 +35544,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][37] = 127, [0][1][2][0][RTW89_CHILE][37] = 64, [0][1][2][0][RTW89_QATAR][37] = 127, + [0][1][2][0][RTW89_THAILAND][37] = 127, [0][1][2][0][RTW89_FCC][38] = 72, [0][1][2][0][RTW89_ETSI][38] = 18, [0][1][2][0][RTW89_MKK][38] = 127, [0][1][2][0][RTW89_IC][38] = 72, [0][1][2][0][RTW89_KCC][38] = 56, [0][1][2][0][RTW89_ACMA][38] = 70, - [0][1][2][0][RTW89_CN][38] = 68, + [0][1][2][0][RTW89_CN][38] = 56, [0][1][2][0][RTW89_UK][38] = 52, [0][1][2][0][RTW89_MEXICO][38] = 72, [0][1][2][0][RTW89_UKRAINE][38] = 18, [0][1][2][0][RTW89_CHILE][38] = 70, [0][1][2][0][RTW89_QATAR][38] = 18, + [0][1][2][0][RTW89_THAILAND][38] = 18, [0][1][2][0][RTW89_FCC][40] = 72, [0][1][2][0][RTW89_ETSI][40] = 18, [0][1][2][0][RTW89_MKK][40] = 127, [0][1][2][0][RTW89_IC][40] = 72, [0][1][2][0][RTW89_KCC][40] = 56, [0][1][2][0][RTW89_ACMA][40] = 70, - [0][1][2][0][RTW89_CN][40] = 68, + [0][1][2][0][RTW89_CN][40] = 56, [0][1][2][0][RTW89_UK][40] = 52, [0][1][2][0][RTW89_MEXICO][40] = 72, [0][1][2][0][RTW89_UKRAINE][40] = 18, [0][1][2][0][RTW89_CHILE][40] = 70, [0][1][2][0][RTW89_QATAR][40] = 18, + [0][1][2][0][RTW89_THAILAND][40] = 18, [0][1][2][0][RTW89_FCC][42] = 72, [0][1][2][0][RTW89_ETSI][42] = 18, [0][1][2][0][RTW89_MKK][42] = 127, [0][1][2][0][RTW89_IC][42] = 72, [0][1][2][0][RTW89_KCC][42] = 56, [0][1][2][0][RTW89_ACMA][42] = 70, - [0][1][2][0][RTW89_CN][42] = 68, + [0][1][2][0][RTW89_CN][42] = 56, [0][1][2][0][RTW89_UK][42] = 52, [0][1][2][0][RTW89_MEXICO][42] = 72, [0][1][2][0][RTW89_UKRAINE][42] = 18, [0][1][2][0][RTW89_CHILE][42] = 70, [0][1][2][0][RTW89_QATAR][42] = 18, + [0][1][2][0][RTW89_THAILAND][42] = 18, [0][1][2][0][RTW89_FCC][44] = 72, [0][1][2][0][RTW89_ETSI][44] = 18, [0][1][2][0][RTW89_MKK][44] = 127, [0][1][2][0][RTW89_IC][44] = 72, [0][1][2][0][RTW89_KCC][44] = 56, [0][1][2][0][RTW89_ACMA][44] = 70, - [0][1][2][0][RTW89_CN][44] = 68, + [0][1][2][0][RTW89_CN][44] = 56, [0][1][2][0][RTW89_UK][44] = 52, [0][1][2][0][RTW89_MEXICO][44] = 72, [0][1][2][0][RTW89_UKRAINE][44] = 18, [0][1][2][0][RTW89_CHILE][44] = 70, [0][1][2][0][RTW89_QATAR][44] = 18, + [0][1][2][0][RTW89_THAILAND][44] = 18, [0][1][2][0][RTW89_FCC][46] = 72, [0][1][2][0][RTW89_ETSI][46] = 18, [0][1][2][0][RTW89_MKK][46] = 127, [0][1][2][0][RTW89_IC][46] = 72, [0][1][2][0][RTW89_KCC][46] = 56, [0][1][2][0][RTW89_ACMA][46] = 70, - [0][1][2][0][RTW89_CN][46] = 68, + [0][1][2][0][RTW89_CN][46] = 56, [0][1][2][0][RTW89_UK][46] = 52, [0][1][2][0][RTW89_MEXICO][46] = 72, [0][1][2][0][RTW89_UKRAINE][46] = 18, [0][1][2][0][RTW89_CHILE][46] = 70, [0][1][2][0][RTW89_QATAR][46] = 18, + [0][1][2][0][RTW89_THAILAND][46] = 18, [0][1][2][0][RTW89_FCC][48] = 48, [0][1][2][0][RTW89_ETSI][48] = 127, [0][1][2][0][RTW89_MKK][48] = 127, @@ -35300,6 +35622,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][48] = 127, [0][1][2][0][RTW89_CHILE][48] = 127, [0][1][2][0][RTW89_QATAR][48] = 127, + [0][1][2][0][RTW89_THAILAND][48] = 127, [0][1][2][0][RTW89_FCC][50] = 50, [0][1][2][0][RTW89_ETSI][50] = 127, [0][1][2][0][RTW89_MKK][50] = 127, @@ -35312,6 +35635,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][50] = 127, [0][1][2][0][RTW89_CHILE][50] = 127, [0][1][2][0][RTW89_QATAR][50] = 127, + [0][1][2][0][RTW89_THAILAND][50] = 127, [0][1][2][0][RTW89_FCC][52] = 48, [0][1][2][0][RTW89_ETSI][52] = 127, [0][1][2][0][RTW89_MKK][52] = 127, @@ -35324,6 +35648,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][52] = 127, [0][1][2][0][RTW89_CHILE][52] = 127, [0][1][2][0][RTW89_QATAR][52] = 127, + [0][1][2][0][RTW89_THAILAND][52] = 127, [0][1][2][1][RTW89_FCC][0] = 60, [0][1][2][1][RTW89_ETSI][0] = 40, [0][1][2][1][RTW89_MKK][0] = 54, @@ -35336,6 +35661,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][0] = 30, [0][1][2][1][RTW89_CHILE][0] = 60, [0][1][2][1][RTW89_QATAR][0] = 40, + [0][1][2][1][RTW89_THAILAND][0] = 40, [0][1][2][1][RTW89_FCC][2] = 62, [0][1][2][1][RTW89_ETSI][2] = 40, [0][1][2][1][RTW89_MKK][2] = 54, @@ -35348,6 +35674,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][2] = 30, [0][1][2][1][RTW89_CHILE][2] = 60, [0][1][2][1][RTW89_QATAR][2] = 40, + [0][1][2][1][RTW89_THAILAND][2] = 40, [0][1][2][1][RTW89_FCC][4] = 62, [0][1][2][1][RTW89_ETSI][4] = 40, [0][1][2][1][RTW89_MKK][4] = 54, @@ -35360,6 +35687,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][4] = 30, [0][1][2][1][RTW89_CHILE][4] = 60, [0][1][2][1][RTW89_QATAR][4] = 40, + [0][1][2][1][RTW89_THAILAND][4] = 40, [0][1][2][1][RTW89_FCC][6] = 62, [0][1][2][1][RTW89_ETSI][6] = 40, [0][1][2][1][RTW89_MKK][6] = 50, @@ -35372,6 +35700,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][6] = 30, [0][1][2][1][RTW89_CHILE][6] = 60, [0][1][2][1][RTW89_QATAR][6] = 40, + [0][1][2][1][RTW89_THAILAND][6] = 40, [0][1][2][1][RTW89_FCC][8] = 62, [0][1][2][1][RTW89_ETSI][8] = 40, [0][1][2][1][RTW89_MKK][8] = 42, @@ -35384,6 +35713,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][8] = 30, [0][1][2][1][RTW89_CHILE][8] = 60, [0][1][2][1][RTW89_QATAR][8] = 40, + [0][1][2][1][RTW89_THAILAND][8] = 40, [0][1][2][1][RTW89_FCC][10] = 62, [0][1][2][1][RTW89_ETSI][10] = 40, [0][1][2][1][RTW89_MKK][10] = 54, @@ -35396,6 +35726,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][10] = 30, [0][1][2][1][RTW89_CHILE][10] = 60, [0][1][2][1][RTW89_QATAR][10] = 40, + [0][1][2][1][RTW89_THAILAND][10] = 40, [0][1][2][1][RTW89_FCC][12] = 62, [0][1][2][1][RTW89_ETSI][12] = 40, [0][1][2][1][RTW89_MKK][12] = 54, @@ -35408,6 +35739,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][12] = 30, [0][1][2][1][RTW89_CHILE][12] = 60, [0][1][2][1][RTW89_QATAR][12] = 40, + [0][1][2][1][RTW89_THAILAND][12] = 40, [0][1][2][1][RTW89_FCC][14] = 62, [0][1][2][1][RTW89_ETSI][14] = 40, [0][1][2][1][RTW89_MKK][14] = 54, @@ -35420,6 +35752,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][14] = 30, [0][1][2][1][RTW89_CHILE][14] = 60, [0][1][2][1][RTW89_QATAR][14] = 40, + [0][1][2][1][RTW89_THAILAND][14] = 40, [0][1][2][1][RTW89_FCC][15] = 60, [0][1][2][1][RTW89_ETSI][15] = 40, [0][1][2][1][RTW89_MKK][15] = 68, @@ -35432,6 +35765,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][15] = 30, [0][1][2][1][RTW89_CHILE][15] = 60, [0][1][2][1][RTW89_QATAR][15] = 40, + [0][1][2][1][RTW89_THAILAND][15] = 40, [0][1][2][1][RTW89_FCC][17] = 62, [0][1][2][1][RTW89_ETSI][17] = 40, [0][1][2][1][RTW89_MKK][17] = 68, @@ -35444,6 +35778,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][17] = 30, [0][1][2][1][RTW89_CHILE][17] = 60, [0][1][2][1][RTW89_QATAR][17] = 40, + [0][1][2][1][RTW89_THAILAND][17] = 40, [0][1][2][1][RTW89_FCC][19] = 62, [0][1][2][1][RTW89_ETSI][19] = 40, [0][1][2][1][RTW89_MKK][19] = 68, @@ -35456,6 +35791,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][19] = 30, [0][1][2][1][RTW89_CHILE][19] = 60, [0][1][2][1][RTW89_QATAR][19] = 40, + [0][1][2][1][RTW89_THAILAND][19] = 40, [0][1][2][1][RTW89_FCC][21] = 62, [0][1][2][1][RTW89_ETSI][21] = 40, [0][1][2][1][RTW89_MKK][21] = 68, @@ -35468,6 +35804,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][21] = 30, [0][1][2][1][RTW89_CHILE][21] = 60, [0][1][2][1][RTW89_QATAR][21] = 40, + [0][1][2][1][RTW89_THAILAND][21] = 40, [0][1][2][1][RTW89_FCC][23] = 62, [0][1][2][1][RTW89_ETSI][23] = 40, [0][1][2][1][RTW89_MKK][23] = 68, @@ -35480,6 +35817,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][23] = 30, [0][1][2][1][RTW89_CHILE][23] = 60, [0][1][2][1][RTW89_QATAR][23] = 40, + [0][1][2][1][RTW89_THAILAND][23] = 40, [0][1][2][1][RTW89_FCC][25] = 46, [0][1][2][1][RTW89_ETSI][25] = 40, [0][1][2][1][RTW89_MKK][25] = 68, @@ -35492,6 +35830,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][25] = 30, [0][1][2][1][RTW89_CHILE][25] = 60, [0][1][2][1][RTW89_QATAR][25] = 40, + [0][1][2][1][RTW89_THAILAND][25] = 40, [0][1][2][1][RTW89_FCC][27] = 46, [0][1][2][1][RTW89_ETSI][27] = 40, [0][1][2][1][RTW89_MKK][27] = 68, @@ -35504,6 +35843,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][27] = 30, [0][1][2][1][RTW89_CHILE][27] = 46, [0][1][2][1][RTW89_QATAR][27] = 40, + [0][1][2][1][RTW89_THAILAND][27] = 40, [0][1][2][1][RTW89_FCC][29] = 46, [0][1][2][1][RTW89_ETSI][29] = 40, [0][1][2][1][RTW89_MKK][29] = 68, @@ -35516,6 +35856,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][29] = 30, [0][1][2][1][RTW89_CHILE][29] = 46, [0][1][2][1][RTW89_QATAR][29] = 40, + [0][1][2][1][RTW89_THAILAND][29] = 40, [0][1][2][1][RTW89_FCC][31] = 46, [0][1][2][1][RTW89_ETSI][31] = 40, [0][1][2][1][RTW89_MKK][31] = 68, @@ -35528,6 +35869,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][31] = 30, [0][1][2][1][RTW89_CHILE][31] = 46, [0][1][2][1][RTW89_QATAR][31] = 40, + [0][1][2][1][RTW89_THAILAND][31] = 40, [0][1][2][1][RTW89_FCC][33] = 46, [0][1][2][1][RTW89_ETSI][33] = 40, [0][1][2][1][RTW89_MKK][33] = 68, @@ -35540,6 +35882,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][33] = 30, [0][1][2][1][RTW89_CHILE][33] = 46, [0][1][2][1][RTW89_QATAR][33] = 40, + [0][1][2][1][RTW89_THAILAND][33] = 40, [0][1][2][1][RTW89_FCC][35] = 46, [0][1][2][1][RTW89_ETSI][35] = 40, [0][1][2][1][RTW89_MKK][35] = 68, @@ -35552,6 +35895,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][35] = 30, [0][1][2][1][RTW89_CHILE][35] = 46, [0][1][2][1][RTW89_QATAR][35] = 40, + [0][1][2][1][RTW89_THAILAND][35] = 40, [0][1][2][1][RTW89_FCC][37] = 64, [0][1][2][1][RTW89_ETSI][37] = 127, [0][1][2][1][RTW89_MKK][37] = 68, @@ -35564,66 +35908,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][37] = 127, [0][1][2][1][RTW89_CHILE][37] = 64, [0][1][2][1][RTW89_QATAR][37] = 127, + [0][1][2][1][RTW89_THAILAND][37] = 127, [0][1][2][1][RTW89_FCC][38] = 72, [0][1][2][1][RTW89_ETSI][38] = 6, [0][1][2][1][RTW89_MKK][38] = 127, [0][1][2][1][RTW89_IC][38] = 72, [0][1][2][1][RTW89_KCC][38] = 56, [0][1][2][1][RTW89_ACMA][38] = 70, - [0][1][2][1][RTW89_CN][38] = 60, + [0][1][2][1][RTW89_CN][38] = 50, [0][1][2][1][RTW89_UK][38] = 40, [0][1][2][1][RTW89_MEXICO][38] = 72, [0][1][2][1][RTW89_UKRAINE][38] = 6, [0][1][2][1][RTW89_CHILE][38] = 60, [0][1][2][1][RTW89_QATAR][38] = 6, + [0][1][2][1][RTW89_THAILAND][38] = 6, [0][1][2][1][RTW89_FCC][40] = 72, [0][1][2][1][RTW89_ETSI][40] = 6, [0][1][2][1][RTW89_MKK][40] = 127, [0][1][2][1][RTW89_IC][40] = 72, [0][1][2][1][RTW89_KCC][40] = 56, [0][1][2][1][RTW89_ACMA][40] = 70, - [0][1][2][1][RTW89_CN][40] = 60, + [0][1][2][1][RTW89_CN][40] = 50, [0][1][2][1][RTW89_UK][40] = 40, [0][1][2][1][RTW89_MEXICO][40] = 72, [0][1][2][1][RTW89_UKRAINE][40] = 6, [0][1][2][1][RTW89_CHILE][40] = 60, [0][1][2][1][RTW89_QATAR][40] = 6, + [0][1][2][1][RTW89_THAILAND][40] = 6, [0][1][2][1][RTW89_FCC][42] = 72, [0][1][2][1][RTW89_ETSI][42] = 6, [0][1][2][1][RTW89_MKK][42] = 127, [0][1][2][1][RTW89_IC][42] = 72, [0][1][2][1][RTW89_KCC][42] = 56, [0][1][2][1][RTW89_ACMA][42] = 70, - [0][1][2][1][RTW89_CN][42] = 60, + [0][1][2][1][RTW89_CN][42] = 50, [0][1][2][1][RTW89_UK][42] = 40, [0][1][2][1][RTW89_MEXICO][42] = 72, [0][1][2][1][RTW89_UKRAINE][42] = 6, [0][1][2][1][RTW89_CHILE][42] = 60, [0][1][2][1][RTW89_QATAR][42] = 6, + [0][1][2][1][RTW89_THAILAND][42] = 6, [0][1][2][1][RTW89_FCC][44] = 72, [0][1][2][1][RTW89_ETSI][44] = 6, [0][1][2][1][RTW89_MKK][44] = 127, [0][1][2][1][RTW89_IC][44] = 72, [0][1][2][1][RTW89_KCC][44] = 56, [0][1][2][1][RTW89_ACMA][44] = 70, - [0][1][2][1][RTW89_CN][44] = 54, + [0][1][2][1][RTW89_CN][44] = 50, [0][1][2][1][RTW89_UK][44] = 40, [0][1][2][1][RTW89_MEXICO][44] = 72, [0][1][2][1][RTW89_UKRAINE][44] = 6, [0][1][2][1][RTW89_CHILE][44] = 60, [0][1][2][1][RTW89_QATAR][44] = 6, + [0][1][2][1][RTW89_THAILAND][44] = 6, [0][1][2][1][RTW89_FCC][46] = 72, [0][1][2][1][RTW89_ETSI][46] = 6, [0][1][2][1][RTW89_MKK][46] = 127, [0][1][2][1][RTW89_IC][46] = 72, [0][1][2][1][RTW89_KCC][46] = 56, [0][1][2][1][RTW89_ACMA][46] = 70, - [0][1][2][1][RTW89_CN][46] = 54, + [0][1][2][1][RTW89_CN][46] = 50, [0][1][2][1][RTW89_UK][46] = 40, [0][1][2][1][RTW89_MEXICO][46] = 72, [0][1][2][1][RTW89_UKRAINE][46] = 6, [0][1][2][1][RTW89_CHILE][46] = 60, [0][1][2][1][RTW89_QATAR][46] = 6, + [0][1][2][1][RTW89_THAILAND][46] = 6, [0][1][2][1][RTW89_FCC][48] = 48, [0][1][2][1][RTW89_ETSI][48] = 127, [0][1][2][1][RTW89_MKK][48] = 127, @@ -35636,6 +35986,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][48] = 127, [0][1][2][1][RTW89_CHILE][48] = 127, [0][1][2][1][RTW89_QATAR][48] = 127, + [0][1][2][1][RTW89_THAILAND][48] = 127, [0][1][2][1][RTW89_FCC][50] = 50, [0][1][2][1][RTW89_ETSI][50] = 127, [0][1][2][1][RTW89_MKK][50] = 127, @@ -35648,6 +35999,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][50] = 127, [0][1][2][1][RTW89_CHILE][50] = 127, [0][1][2][1][RTW89_QATAR][50] = 127, + [0][1][2][1][RTW89_THAILAND][50] = 127, [0][1][2][1][RTW89_FCC][52] = 48, [0][1][2][1][RTW89_ETSI][52] = 127, [0][1][2][1][RTW89_MKK][52] = 127, @@ -35660,6 +36012,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][52] = 127, [0][1][2][1][RTW89_CHILE][52] = 127, [0][1][2][1][RTW89_QATAR][52] = 127, + [0][1][2][1][RTW89_THAILAND][52] = 127, [1][0][2][0][RTW89_FCC][1] = 64, [1][0][2][0][RTW89_ETSI][1] = 66, [1][0][2][0][RTW89_MKK][1] = 66, @@ -35672,6 +36025,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][1] = 54, [1][0][2][0][RTW89_CHILE][1] = 62, [1][0][2][0][RTW89_QATAR][1] = 66, + [1][0][2][0][RTW89_THAILAND][1] = 66, [1][0][2][0][RTW89_FCC][5] = 68, [1][0][2][0][RTW89_ETSI][5] = 66, [1][0][2][0][RTW89_MKK][5] = 66, @@ -35684,6 +36038,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][5] = 54, [1][0][2][0][RTW89_CHILE][5] = 66, [1][0][2][0][RTW89_QATAR][5] = 66, + [1][0][2][0][RTW89_THAILAND][5] = 66, [1][0][2][0][RTW89_FCC][9] = 68, [1][0][2][0][RTW89_ETSI][9] = 66, [1][0][2][0][RTW89_MKK][9] = 66, @@ -35696,6 +36051,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][9] = 54, [1][0][2][0][RTW89_CHILE][9] = 66, [1][0][2][0][RTW89_QATAR][9] = 66, + [1][0][2][0][RTW89_THAILAND][9] = 66, [1][0][2][0][RTW89_FCC][13] = 60, [1][0][2][0][RTW89_ETSI][13] = 66, [1][0][2][0][RTW89_MKK][13] = 66, @@ -35708,6 +36064,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][13] = 54, [1][0][2][0][RTW89_CHILE][13] = 60, [1][0][2][0][RTW89_QATAR][13] = 66, + [1][0][2][0][RTW89_THAILAND][13] = 66, [1][0][2][0][RTW89_FCC][16] = 64, [1][0][2][0][RTW89_ETSI][16] = 66, [1][0][2][0][RTW89_MKK][16] = 66, @@ -35720,6 +36077,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][16] = 54, [1][0][2][0][RTW89_CHILE][16] = 64, [1][0][2][0][RTW89_QATAR][16] = 66, + [1][0][2][0][RTW89_THAILAND][16] = 66, [1][0][2][0][RTW89_FCC][20] = 68, [1][0][2][0][RTW89_ETSI][20] = 66, [1][0][2][0][RTW89_MKK][20] = 66, @@ -35732,6 +36090,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][20] = 54, [1][0][2][0][RTW89_CHILE][20] = 66, [1][0][2][0][RTW89_QATAR][20] = 66, + [1][0][2][0][RTW89_THAILAND][20] = 66, [1][0][2][0][RTW89_FCC][24] = 68, [1][0][2][0][RTW89_ETSI][24] = 66, [1][0][2][0][RTW89_MKK][24] = 66, @@ -35744,6 +36103,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][24] = 54, [1][0][2][0][RTW89_CHILE][24] = 66, [1][0][2][0][RTW89_QATAR][24] = 66, + [1][0][2][0][RTW89_THAILAND][24] = 66, [1][0][2][0][RTW89_FCC][28] = 68, [1][0][2][0][RTW89_ETSI][28] = 66, [1][0][2][0][RTW89_MKK][28] = 66, @@ -35756,6 +36116,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][28] = 54, [1][0][2][0][RTW89_CHILE][28] = 62, [1][0][2][0][RTW89_QATAR][28] = 66, + [1][0][2][0][RTW89_THAILAND][28] = 66, [1][0][2][0][RTW89_FCC][32] = 62, [1][0][2][0][RTW89_ETSI][32] = 66, [1][0][2][0][RTW89_MKK][32] = 66, @@ -35768,6 +36129,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][32] = 54, [1][0][2][0][RTW89_CHILE][32] = 62, [1][0][2][0][RTW89_QATAR][32] = 66, + [1][0][2][0][RTW89_THAILAND][32] = 66, [1][0][2][0][RTW89_FCC][36] = 68, [1][0][2][0][RTW89_ETSI][36] = 127, [1][0][2][0][RTW89_MKK][36] = 66, @@ -35780,30 +36142,33 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][36] = 127, [1][0][2][0][RTW89_CHILE][36] = 66, [1][0][2][0][RTW89_QATAR][36] = 127, + [1][0][2][0][RTW89_THAILAND][36] = 127, [1][0][2][0][RTW89_FCC][39] = 68, [1][0][2][0][RTW89_ETSI][39] = 30, [1][0][2][0][RTW89_MKK][39] = 127, [1][0][2][0][RTW89_IC][39] = 68, [1][0][2][0][RTW89_KCC][39] = 66, [1][0][2][0][RTW89_ACMA][39] = 66, - [1][0][2][0][RTW89_CN][39] = 62, + [1][0][2][0][RTW89_CN][39] = 52, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_MEXICO][39] = 68, [1][0][2][0][RTW89_UKRAINE][39] = 30, [1][0][2][0][RTW89_CHILE][39] = 66, [1][0][2][0][RTW89_QATAR][39] = 30, + [1][0][2][0][RTW89_THAILAND][39] = 30, [1][0][2][0][RTW89_FCC][43] = 68, [1][0][2][0][RTW89_ETSI][43] = 30, [1][0][2][0][RTW89_MKK][43] = 127, [1][0][2][0][RTW89_IC][43] = 68, [1][0][2][0][RTW89_KCC][43] = 66, [1][0][2][0][RTW89_ACMA][43] = 66, - [1][0][2][0][RTW89_CN][43] = 66, + [1][0][2][0][RTW89_CN][43] = 52, [1][0][2][0][RTW89_UK][43] = 64, [1][0][2][0][RTW89_MEXICO][43] = 68, [1][0][2][0][RTW89_UKRAINE][43] = 30, [1][0][2][0][RTW89_CHILE][43] = 66, [1][0][2][0][RTW89_QATAR][43] = 30, + [1][0][2][0][RTW89_THAILAND][43] = 30, [1][0][2][0][RTW89_FCC][47] = 68, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, @@ -35816,6 +36181,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][47] = 127, [1][0][2][0][RTW89_CHILE][47] = 127, [1][0][2][0][RTW89_QATAR][47] = 127, + [1][0][2][0][RTW89_THAILAND][47] = 127, [1][0][2][0][RTW89_FCC][51] = 68, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, @@ -35828,6 +36194,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][51] = 127, [1][0][2][0][RTW89_CHILE][51] = 127, [1][0][2][0][RTW89_QATAR][51] = 127, + [1][0][2][0][RTW89_THAILAND][51] = 127, [1][1][2][0][RTW89_FCC][1] = 54, [1][1][2][0][RTW89_ETSI][1] = 54, [1][1][2][0][RTW89_MKK][1] = 48, @@ -35840,6 +36207,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][1] = 42, [1][1][2][0][RTW89_CHILE][1] = 54, [1][1][2][0][RTW89_QATAR][1] = 54, + [1][1][2][0][RTW89_THAILAND][1] = 54, [1][1][2][0][RTW89_FCC][5] = 68, [1][1][2][0][RTW89_ETSI][5] = 54, [1][1][2][0][RTW89_MKK][5] = 52, @@ -35852,6 +36220,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][5] = 42, [1][1][2][0][RTW89_CHILE][5] = 66, [1][1][2][0][RTW89_QATAR][5] = 54, + [1][1][2][0][RTW89_THAILAND][5] = 54, [1][1][2][0][RTW89_FCC][9] = 68, [1][1][2][0][RTW89_ETSI][9] = 54, [1][1][2][0][RTW89_MKK][9] = 52, @@ -35864,6 +36233,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][9] = 42, [1][1][2][0][RTW89_CHILE][9] = 66, [1][1][2][0][RTW89_QATAR][9] = 54, + [1][1][2][0][RTW89_THAILAND][9] = 54, [1][1][2][0][RTW89_FCC][13] = 54, [1][1][2][0][RTW89_ETSI][13] = 54, [1][1][2][0][RTW89_MKK][13] = 52, @@ -35876,6 +36246,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][13] = 42, [1][1][2][0][RTW89_CHILE][13] = 54, [1][1][2][0][RTW89_QATAR][13] = 54, + [1][1][2][0][RTW89_THAILAND][13] = 54, [1][1][2][0][RTW89_FCC][16] = 56, [1][1][2][0][RTW89_ETSI][16] = 54, [1][1][2][0][RTW89_MKK][16] = 66, @@ -35888,6 +36259,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][16] = 42, [1][1][2][0][RTW89_CHILE][16] = 54, [1][1][2][0][RTW89_QATAR][16] = 54, + [1][1][2][0][RTW89_THAILAND][16] = 54, [1][1][2][0][RTW89_FCC][20] = 68, [1][1][2][0][RTW89_ETSI][20] = 54, [1][1][2][0][RTW89_MKK][20] = 66, @@ -35900,6 +36272,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][20] = 42, [1][1][2][0][RTW89_CHILE][20] = 66, [1][1][2][0][RTW89_QATAR][20] = 54, + [1][1][2][0][RTW89_THAILAND][20] = 54, [1][1][2][0][RTW89_FCC][24] = 68, [1][1][2][0][RTW89_ETSI][24] = 54, [1][1][2][0][RTW89_MKK][24] = 66, @@ -35912,6 +36285,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][24] = 42, [1][1][2][0][RTW89_CHILE][24] = 66, [1][1][2][0][RTW89_QATAR][24] = 54, + [1][1][2][0][RTW89_THAILAND][24] = 54, [1][1][2][0][RTW89_FCC][28] = 68, [1][1][2][0][RTW89_ETSI][28] = 54, [1][1][2][0][RTW89_MKK][28] = 66, @@ -35924,6 +36298,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][28] = 42, [1][1][2][0][RTW89_CHILE][28] = 54, [1][1][2][0][RTW89_QATAR][28] = 54, + [1][1][2][0][RTW89_THAILAND][28] = 54, [1][1][2][0][RTW89_FCC][32] = 56, [1][1][2][0][RTW89_ETSI][32] = 54, [1][1][2][0][RTW89_MKK][32] = 66, @@ -35936,6 +36311,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][32] = 42, [1][1][2][0][RTW89_CHILE][32] = 54, [1][1][2][0][RTW89_QATAR][32] = 54, + [1][1][2][0][RTW89_THAILAND][32] = 54, [1][1][2][0][RTW89_FCC][36] = 68, [1][1][2][0][RTW89_ETSI][36] = 127, [1][1][2][0][RTW89_MKK][36] = 66, @@ -35948,30 +36324,33 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][36] = 127, [1][1][2][0][RTW89_CHILE][36] = 66, [1][1][2][0][RTW89_QATAR][36] = 127, + [1][1][2][0][RTW89_THAILAND][36] = 127, [1][1][2][0][RTW89_FCC][39] = 68, [1][1][2][0][RTW89_ETSI][39] = 18, [1][1][2][0][RTW89_MKK][39] = 127, [1][1][2][0][RTW89_IC][39] = 68, [1][1][2][0][RTW89_KCC][39] = 56, [1][1][2][0][RTW89_ACMA][39] = 66, - [1][1][2][0][RTW89_CN][39] = 62, + [1][1][2][0][RTW89_CN][39] = 52, [1][1][2][0][RTW89_UK][39] = 52, [1][1][2][0][RTW89_MEXICO][39] = 68, [1][1][2][0][RTW89_UKRAINE][39] = 18, [1][1][2][0][RTW89_CHILE][39] = 66, [1][1][2][0][RTW89_QATAR][39] = 18, + [1][1][2][0][RTW89_THAILAND][39] = 18, [1][1][2][0][RTW89_FCC][43] = 68, [1][1][2][0][RTW89_ETSI][43] = 18, [1][1][2][0][RTW89_MKK][43] = 127, [1][1][2][0][RTW89_IC][43] = 68, [1][1][2][0][RTW89_KCC][43] = 56, [1][1][2][0][RTW89_ACMA][43] = 66, - [1][1][2][0][RTW89_CN][43] = 66, + [1][1][2][0][RTW89_CN][43] = 52, [1][1][2][0][RTW89_UK][43] = 52, [1][1][2][0][RTW89_MEXICO][43] = 68, [1][1][2][0][RTW89_UKRAINE][43] = 18, [1][1][2][0][RTW89_CHILE][43] = 66, [1][1][2][0][RTW89_QATAR][43] = 18, + [1][1][2][0][RTW89_THAILAND][43] = 18, [1][1][2][0][RTW89_FCC][47] = 62, [1][1][2][0][RTW89_ETSI][47] = 127, [1][1][2][0][RTW89_MKK][47] = 127, @@ -35984,6 +36363,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][47] = 127, [1][1][2][0][RTW89_CHILE][47] = 127, [1][1][2][0][RTW89_QATAR][47] = 127, + [1][1][2][0][RTW89_THAILAND][47] = 127, [1][1][2][0][RTW89_FCC][51] = 60, [1][1][2][0][RTW89_ETSI][51] = 127, [1][1][2][0][RTW89_MKK][51] = 127, @@ -35996,6 +36376,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][51] = 127, [1][1][2][0][RTW89_CHILE][51] = 127, [1][1][2][0][RTW89_QATAR][51] = 127, + [1][1][2][0][RTW89_THAILAND][51] = 127, [1][1][2][1][RTW89_FCC][1] = 54, [1][1][2][1][RTW89_ETSI][1] = 40, [1][1][2][1][RTW89_MKK][1] = 48, @@ -36008,6 +36389,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][1] = 30, [1][1][2][1][RTW89_CHILE][1] = 54, [1][1][2][1][RTW89_QATAR][1] = 40, + [1][1][2][1][RTW89_THAILAND][1] = 40, [1][1][2][1][RTW89_FCC][5] = 68, [1][1][2][1][RTW89_ETSI][5] = 40, [1][1][2][1][RTW89_MKK][5] = 52, @@ -36020,6 +36402,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][5] = 30, [1][1][2][1][RTW89_CHILE][5] = 60, [1][1][2][1][RTW89_QATAR][5] = 40, + [1][1][2][1][RTW89_THAILAND][5] = 40, [1][1][2][1][RTW89_FCC][9] = 68, [1][1][2][1][RTW89_ETSI][9] = 40, [1][1][2][1][RTW89_MKK][9] = 52, @@ -36032,6 +36415,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][9] = 30, [1][1][2][1][RTW89_CHILE][9] = 60, [1][1][2][1][RTW89_QATAR][9] = 40, + [1][1][2][1][RTW89_THAILAND][9] = 40, [1][1][2][1][RTW89_FCC][13] = 54, [1][1][2][1][RTW89_ETSI][13] = 40, [1][1][2][1][RTW89_MKK][13] = 52, @@ -36044,6 +36428,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][13] = 30, [1][1][2][1][RTW89_CHILE][13] = 54, [1][1][2][1][RTW89_QATAR][13] = 40, + [1][1][2][1][RTW89_THAILAND][13] = 40, [1][1][2][1][RTW89_FCC][16] = 56, [1][1][2][1][RTW89_ETSI][16] = 40, [1][1][2][1][RTW89_MKK][16] = 66, @@ -36056,6 +36441,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][16] = 30, [1][1][2][1][RTW89_CHILE][16] = 54, [1][1][2][1][RTW89_QATAR][16] = 40, + [1][1][2][1][RTW89_THAILAND][16] = 40, [1][1][2][1][RTW89_FCC][20] = 68, [1][1][2][1][RTW89_ETSI][20] = 40, [1][1][2][1][RTW89_MKK][20] = 66, @@ -36068,6 +36454,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][20] = 30, [1][1][2][1][RTW89_CHILE][20] = 60, [1][1][2][1][RTW89_QATAR][20] = 40, + [1][1][2][1][RTW89_THAILAND][20] = 40, [1][1][2][1][RTW89_FCC][24] = 68, [1][1][2][1][RTW89_ETSI][24] = 40, [1][1][2][1][RTW89_MKK][24] = 66, @@ -36080,6 +36467,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][24] = 30, [1][1][2][1][RTW89_CHILE][24] = 60, [1][1][2][1][RTW89_QATAR][24] = 40, + [1][1][2][1][RTW89_THAILAND][24] = 40, [1][1][2][1][RTW89_FCC][28] = 68, [1][1][2][1][RTW89_ETSI][28] = 40, [1][1][2][1][RTW89_MKK][28] = 66, @@ -36092,6 +36480,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][28] = 30, [1][1][2][1][RTW89_CHILE][28] = 54, [1][1][2][1][RTW89_QATAR][28] = 40, + [1][1][2][1][RTW89_THAILAND][28] = 40, [1][1][2][1][RTW89_FCC][32] = 56, [1][1][2][1][RTW89_ETSI][32] = 40, [1][1][2][1][RTW89_MKK][32] = 66, @@ -36104,6 +36493,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][32] = 30, [1][1][2][1][RTW89_CHILE][32] = 54, [1][1][2][1][RTW89_QATAR][32] = 40, + [1][1][2][1][RTW89_THAILAND][32] = 40, [1][1][2][1][RTW89_FCC][36] = 68, [1][1][2][1][RTW89_ETSI][36] = 127, [1][1][2][1][RTW89_MKK][36] = 66, @@ -36116,18 +36506,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][36] = 127, [1][1][2][1][RTW89_CHILE][36] = 66, [1][1][2][1][RTW89_QATAR][36] = 127, + [1][1][2][1][RTW89_THAILAND][36] = 127, [1][1][2][1][RTW89_FCC][39] = 68, [1][1][2][1][RTW89_ETSI][39] = 6, [1][1][2][1][RTW89_MKK][39] = 127, [1][1][2][1][RTW89_IC][39] = 68, [1][1][2][1][RTW89_KCC][39] = 56, [1][1][2][1][RTW89_ACMA][39] = 66, - [1][1][2][1][RTW89_CN][39] = 60, + [1][1][2][1][RTW89_CN][39] = 52, [1][1][2][1][RTW89_UK][39] = 40, [1][1][2][1][RTW89_MEXICO][39] = 68, [1][1][2][1][RTW89_UKRAINE][39] = 6, [1][1][2][1][RTW89_CHILE][39] = 60, [1][1][2][1][RTW89_QATAR][39] = 6, + [1][1][2][1][RTW89_THAILAND][39] = 6, [1][1][2][1][RTW89_FCC][43] = 68, [1][1][2][1][RTW89_ETSI][43] = 6, [1][1][2][1][RTW89_MKK][43] = 127, @@ -36140,6 +36532,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][43] = 6, [1][1][2][1][RTW89_CHILE][43] = 60, [1][1][2][1][RTW89_QATAR][43] = 6, + [1][1][2][1][RTW89_THAILAND][43] = 6, [1][1][2][1][RTW89_FCC][47] = 62, [1][1][2][1][RTW89_ETSI][47] = 127, [1][1][2][1][RTW89_MKK][47] = 127, @@ -36152,6 +36545,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][47] = 127, [1][1][2][1][RTW89_CHILE][47] = 127, [1][1][2][1][RTW89_QATAR][47] = 127, + [1][1][2][1][RTW89_THAILAND][47] = 127, [1][1][2][1][RTW89_FCC][51] = 60, [1][1][2][1][RTW89_ETSI][51] = 127, [1][1][2][1][RTW89_MKK][51] = 127, @@ -36164,6 +36558,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][51] = 127, [1][1][2][1][RTW89_CHILE][51] = 127, [1][1][2][1][RTW89_QATAR][51] = 127, + [1][1][2][1][RTW89_THAILAND][51] = 127, [2][0][2][0][RTW89_FCC][3] = 58, [2][0][2][0][RTW89_ETSI][3] = 60, [2][0][2][0][RTW89_MKK][3] = 60, @@ -36176,6 +36571,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][3] = 54, [2][0][2][0][RTW89_CHILE][3] = 58, [2][0][2][0][RTW89_QATAR][3] = 60, + [2][0][2][0][RTW89_THAILAND][3] = 60, [2][0][2][0][RTW89_FCC][11] = 50, [2][0][2][0][RTW89_ETSI][11] = 60, [2][0][2][0][RTW89_MKK][11] = 60, @@ -36188,6 +36584,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][11] = 54, [2][0][2][0][RTW89_CHILE][11] = 50, [2][0][2][0][RTW89_QATAR][11] = 60, + [2][0][2][0][RTW89_THAILAND][11] = 60, [2][0][2][0][RTW89_FCC][18] = 60, [2][0][2][0][RTW89_ETSI][18] = 60, [2][0][2][0][RTW89_MKK][18] = 60, @@ -36200,6 +36597,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][18] = 54, [2][0][2][0][RTW89_CHILE][18] = 60, [2][0][2][0][RTW89_QATAR][18] = 60, + [2][0][2][0][RTW89_THAILAND][18] = 60, [2][0][2][0][RTW89_FCC][26] = 62, [2][0][2][0][RTW89_ETSI][26] = 60, [2][0][2][0][RTW89_MKK][26] = 60, @@ -36212,6 +36610,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][26] = 54, [2][0][2][0][RTW89_CHILE][26] = 60, [2][0][2][0][RTW89_QATAR][26] = 60, + [2][0][2][0][RTW89_THAILAND][26] = 60, [2][0][2][0][RTW89_FCC][34] = 62, [2][0][2][0][RTW89_ETSI][34] = 127, [2][0][2][0][RTW89_MKK][34] = 60, @@ -36224,18 +36623,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][34] = 127, [2][0][2][0][RTW89_CHILE][34] = 60, [2][0][2][0][RTW89_QATAR][34] = 127, + [2][0][2][0][RTW89_THAILAND][34] = 127, [2][0][2][0][RTW89_FCC][41] = 62, [2][0][2][0][RTW89_ETSI][41] = 30, [2][0][2][0][RTW89_MKK][41] = 127, [2][0][2][0][RTW89_IC][41] = 62, [2][0][2][0][RTW89_KCC][41] = 58, [2][0][2][0][RTW89_ACMA][41] = 60, - [2][0][2][0][RTW89_CN][41] = 62, + [2][0][2][0][RTW89_CN][41] = 42, [2][0][2][0][RTW89_UK][41] = 60, [2][0][2][0][RTW89_MEXICO][41] = 62, [2][0][2][0][RTW89_UKRAINE][41] = 30, [2][0][2][0][RTW89_CHILE][41] = 60, [2][0][2][0][RTW89_QATAR][41] = 30, + [2][0][2][0][RTW89_THAILAND][41] = 30, [2][0][2][0][RTW89_FCC][49] = 62, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, @@ -36248,6 +36649,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][49] = 127, [2][0][2][0][RTW89_CHILE][49] = 127, [2][0][2][0][RTW89_QATAR][49] = 127, + [2][0][2][0][RTW89_THAILAND][49] = 127, [2][1][2][0][RTW89_FCC][3] = 48, [2][1][2][0][RTW89_ETSI][3] = 54, [2][1][2][0][RTW89_MKK][3] = 56, @@ -36260,18 +36662,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][3] = 42, [2][1][2][0][RTW89_CHILE][3] = 46, [2][1][2][0][RTW89_QATAR][3] = 54, + [2][1][2][0][RTW89_THAILAND][3] = 54, [2][1][2][0][RTW89_FCC][11] = 38, [2][1][2][0][RTW89_ETSI][11] = 54, [2][1][2][0][RTW89_MKK][11] = 54, [2][1][2][0][RTW89_IC][11] = 38, [2][1][2][0][RTW89_KCC][11] = 52, [2][1][2][0][RTW89_ACMA][11] = 54, - [2][1][2][0][RTW89_CN][11] = 52, + [2][1][2][0][RTW89_CN][11] = 50, [2][1][2][0][RTW89_UK][11] = 54, [2][1][2][0][RTW89_MEXICO][11] = 38, [2][1][2][0][RTW89_UKRAINE][11] = 42, [2][1][2][0][RTW89_CHILE][11] = 38, [2][1][2][0][RTW89_QATAR][11] = 54, + [2][1][2][0][RTW89_THAILAND][11] = 54, [2][1][2][0][RTW89_FCC][18] = 50, [2][1][2][0][RTW89_ETSI][18] = 54, [2][1][2][0][RTW89_MKK][18] = 60, @@ -36284,6 +36688,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][18] = 42, [2][1][2][0][RTW89_CHILE][18] = 50, [2][1][2][0][RTW89_QATAR][18] = 54, + [2][1][2][0][RTW89_THAILAND][18] = 54, [2][1][2][0][RTW89_FCC][26] = 52, [2][1][2][0][RTW89_ETSI][26] = 54, [2][1][2][0][RTW89_MKK][26] = 56, @@ -36296,6 +36701,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][26] = 42, [2][1][2][0][RTW89_CHILE][26] = 52, [2][1][2][0][RTW89_QATAR][26] = 54, + [2][1][2][0][RTW89_THAILAND][26] = 54, [2][1][2][0][RTW89_FCC][34] = 62, [2][1][2][0][RTW89_ETSI][34] = 127, [2][1][2][0][RTW89_MKK][34] = 60, @@ -36308,18 +36714,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][34] = 127, [2][1][2][0][RTW89_CHILE][34] = 60, [2][1][2][0][RTW89_QATAR][34] = 127, + [2][1][2][0][RTW89_THAILAND][34] = 127, [2][1][2][0][RTW89_FCC][41] = 60, [2][1][2][0][RTW89_ETSI][41] = 18, [2][1][2][0][RTW89_MKK][41] = 127, [2][1][2][0][RTW89_IC][41] = 60, [2][1][2][0][RTW89_KCC][41] = 50, [2][1][2][0][RTW89_ACMA][41] = 58, - [2][1][2][0][RTW89_CN][41] = 62, + [2][1][2][0][RTW89_CN][41] = 42, [2][1][2][0][RTW89_UK][41] = 52, [2][1][2][0][RTW89_MEXICO][41] = 60, [2][1][2][0][RTW89_UKRAINE][41] = 18, [2][1][2][0][RTW89_CHILE][41] = 58, [2][1][2][0][RTW89_QATAR][41] = 18, + [2][1][2][0][RTW89_THAILAND][41] = 18, [2][1][2][0][RTW89_FCC][49] = 62, [2][1][2][0][RTW89_ETSI][49] = 127, [2][1][2][0][RTW89_MKK][49] = 127, @@ -36332,6 +36740,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][49] = 127, [2][1][2][0][RTW89_CHILE][49] = 127, [2][1][2][0][RTW89_QATAR][49] = 127, + [2][1][2][0][RTW89_THAILAND][49] = 127, [2][1][2][1][RTW89_FCC][3] = 48, [2][1][2][1][RTW89_ETSI][3] = 40, [2][1][2][1][RTW89_MKK][3] = 56, @@ -36344,6 +36753,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][3] = 30, [2][1][2][1][RTW89_CHILE][3] = 46, [2][1][2][1][RTW89_QATAR][3] = 40, + [2][1][2][1][RTW89_THAILAND][3] = 40, [2][1][2][1][RTW89_FCC][11] = 38, [2][1][2][1][RTW89_ETSI][11] = 40, [2][1][2][1][RTW89_MKK][11] = 54, @@ -36356,6 +36766,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][11] = 30, [2][1][2][1][RTW89_CHILE][11] = 38, [2][1][2][1][RTW89_QATAR][11] = 40, + [2][1][2][1][RTW89_THAILAND][11] = 40, [2][1][2][1][RTW89_FCC][18] = 50, [2][1][2][1][RTW89_ETSI][18] = 40, [2][1][2][1][RTW89_MKK][18] = 60, @@ -36368,6 +36779,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][18] = 30, [2][1][2][1][RTW89_CHILE][18] = 50, [2][1][2][1][RTW89_QATAR][18] = 40, + [2][1][2][1][RTW89_THAILAND][18] = 40, [2][1][2][1][RTW89_FCC][26] = 52, [2][1][2][1][RTW89_ETSI][26] = 42, [2][1][2][1][RTW89_MKK][26] = 56, @@ -36380,6 +36792,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][26] = 30, [2][1][2][1][RTW89_CHILE][26] = 52, [2][1][2][1][RTW89_QATAR][26] = 42, + [2][1][2][1][RTW89_THAILAND][26] = 42, [2][1][2][1][RTW89_FCC][34] = 62, [2][1][2][1][RTW89_ETSI][34] = 127, [2][1][2][1][RTW89_MKK][34] = 60, @@ -36392,18 +36805,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][34] = 127, [2][1][2][1][RTW89_CHILE][34] = 60, [2][1][2][1][RTW89_QATAR][34] = 127, + [2][1][2][1][RTW89_THAILAND][34] = 127, [2][1][2][1][RTW89_FCC][41] = 60, [2][1][2][1][RTW89_ETSI][41] = 6, [2][1][2][1][RTW89_MKK][41] = 127, [2][1][2][1][RTW89_IC][41] = 60, [2][1][2][1][RTW89_KCC][41] = 50, [2][1][2][1][RTW89_ACMA][41] = 58, - [2][1][2][1][RTW89_CN][41] = 40, + [2][1][2][1][RTW89_CN][41] = 36, [2][1][2][1][RTW89_UK][41] = 40, [2][1][2][1][RTW89_MEXICO][41] = 60, [2][1][2][1][RTW89_UKRAINE][41] = 6, [2][1][2][1][RTW89_CHILE][41] = 58, [2][1][2][1][RTW89_QATAR][41] = 6, + [2][1][2][1][RTW89_THAILAND][41] = 6, [2][1][2][1][RTW89_FCC][49] = 62, [2][1][2][1][RTW89_ETSI][49] = 127, [2][1][2][1][RTW89_MKK][49] = 127, @@ -36416,6 +36831,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][49] = 127, [2][1][2][1][RTW89_CHILE][49] = 127, [2][1][2][1][RTW89_QATAR][49] = 127, + [2][1][2][1][RTW89_THAILAND][49] = 127, [3][0][2][0][RTW89_FCC][7] = 40, [3][0][2][0][RTW89_ETSI][7] = 50, [3][0][2][0][RTW89_MKK][7] = 50, @@ -36428,18 +36844,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_UKRAINE][7] = 50, [3][0][2][0][RTW89_CHILE][7] = 40, [3][0][2][0][RTW89_QATAR][7] = 50, + [3][0][2][0][RTW89_THAILAND][7] = 50, [3][0][2][0][RTW89_FCC][22] = 42, [3][0][2][0][RTW89_ETSI][22] = 50, [3][0][2][0][RTW89_MKK][22] = 50, [3][0][2][0][RTW89_IC][22] = 127, [3][0][2][0][RTW89_KCC][22] = 50, [3][0][2][0][RTW89_ACMA][22] = 127, - [3][0][2][0][RTW89_CN][22] = 66, + [3][0][2][0][RTW89_CN][22] = 127, [3][0][2][0][RTW89_UK][22] = 127, [3][0][2][0][RTW89_MEXICO][22] = 127, [3][0][2][0][RTW89_UKRAINE][22] = 50, [3][0][2][0][RTW89_CHILE][22] = 42, [3][0][2][0][RTW89_QATAR][22] = 50, + [3][0][2][0][RTW89_THAILAND][22] = 50, [3][0][2][0][RTW89_FCC][45] = 52, [3][0][2][0][RTW89_ETSI][45] = 127, [3][0][2][0][RTW89_MKK][45] = 127, @@ -36452,6 +36870,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_UKRAINE][45] = 127, [3][0][2][0][RTW89_CHILE][45] = 127, [3][0][2][0][RTW89_QATAR][45] = 127, + [3][0][2][0][RTW89_THAILAND][45] = 127, [3][1][2][0][RTW89_FCC][7] = 32, [3][1][2][0][RTW89_ETSI][7] = 50, [3][1][2][0][RTW89_MKK][7] = 36, @@ -36464,18 +36883,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_UKRAINE][7] = 50, [3][1][2][0][RTW89_CHILE][7] = 32, [3][1][2][0][RTW89_QATAR][7] = 50, + [3][1][2][0][RTW89_THAILAND][7] = 50, [3][1][2][0][RTW89_FCC][22] = 36, [3][1][2][0][RTW89_ETSI][22] = 50, [3][1][2][0][RTW89_MKK][22] = 48, [3][1][2][0][RTW89_IC][22] = 127, [3][1][2][0][RTW89_KCC][22] = 50, [3][1][2][0][RTW89_ACMA][22] = 127, - [3][1][2][0][RTW89_CN][22] = 54, + [3][1][2][0][RTW89_CN][22] = 127, [3][1][2][0][RTW89_UK][22] = 127, [3][1][2][0][RTW89_MEXICO][22] = 127, [3][1][2][0][RTW89_UKRAINE][22] = 50, [3][1][2][0][RTW89_CHILE][22] = 36, [3][1][2][0][RTW89_QATAR][22] = 50, + [3][1][2][0][RTW89_THAILAND][22] = 50, [3][1][2][0][RTW89_FCC][45] = 46, [3][1][2][0][RTW89_ETSI][45] = 127, [3][1][2][0][RTW89_MKK][45] = 127, @@ -36488,6 +36909,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_UKRAINE][45] = 127, [3][1][2][0][RTW89_CHILE][45] = 127, [3][1][2][0][RTW89_QATAR][45] = 127, + [3][1][2][0][RTW89_THAILAND][45] = 127, [3][1][2][1][RTW89_FCC][7] = 32, [3][1][2][1][RTW89_ETSI][7] = 42, [3][1][2][1][RTW89_MKK][7] = 36, @@ -36500,18 +36922,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_UKRAINE][7] = 42, [3][1][2][1][RTW89_CHILE][7] = 32, [3][1][2][1][RTW89_QATAR][7] = 42, + [3][1][2][1][RTW89_THAILAND][7] = 42, [3][1][2][1][RTW89_FCC][22] = 36, [3][1][2][1][RTW89_ETSI][22] = 42, [3][1][2][1][RTW89_MKK][22] = 48, [3][1][2][1][RTW89_IC][22] = 127, [3][1][2][1][RTW89_KCC][22] = 50, [3][1][2][1][RTW89_ACMA][22] = 127, - [3][1][2][1][RTW89_CN][22] = 42, + [3][1][2][1][RTW89_CN][22] = 127, [3][1][2][1][RTW89_UK][22] = 127, [3][1][2][1][RTW89_MEXICO][22] = 127, [3][1][2][1][RTW89_UKRAINE][22] = 42, [3][1][2][1][RTW89_CHILE][22] = 36, [3][1][2][1][RTW89_QATAR][22] = 42, + [3][1][2][1][RTW89_THAILAND][22] = 42, [3][1][2][1][RTW89_FCC][45] = 46, [3][1][2][1][RTW89_ETSI][45] = 127, [3][1][2][1][RTW89_MKK][45] = 127, @@ -36524,6 +36948,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_UKRAINE][45] = 127, [3][1][2][1][RTW89_CHILE][45] = 127, [3][1][2][1][RTW89_QATAR][45] = 127, + [3][1][2][1][RTW89_THAILAND][45] = 127, }; static @@ -36605,19 +37030,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][2][44] = 70, [0][0][1][0][RTW89_WW][0][45] = 22, [0][0][1][0][RTW89_WW][1][45] = 22, - [0][0][1][0][RTW89_WW][2][45] = 0, + [0][0][1][0][RTW89_WW][2][45] = 70, [0][0][1][0][RTW89_WW][0][47] = 22, [0][0][1][0][RTW89_WW][1][47] = 22, - [0][0][1][0][RTW89_WW][2][47] = 0, + [0][0][1][0][RTW89_WW][2][47] = 70, [0][0][1][0][RTW89_WW][0][49] = 24, [0][0][1][0][RTW89_WW][1][49] = 24, - [0][0][1][0][RTW89_WW][2][49] = 0, + [0][0][1][0][RTW89_WW][2][49] = 70, [0][0][1][0][RTW89_WW][0][51] = 22, [0][0][1][0][RTW89_WW][1][51] = 22, - [0][0][1][0][RTW89_WW][2][51] = 0, + [0][0][1][0][RTW89_WW][2][51] = 70, [0][0][1][0][RTW89_WW][0][53] = 22, [0][0][1][0][RTW89_WW][1][53] = 22, - [0][0][1][0][RTW89_WW][2][53] = 0, + [0][0][1][0][RTW89_WW][2][53] = 70, [0][0][1][0][RTW89_WW][0][55] = 22, [0][0][1][0][RTW89_WW][1][55] = 22, [0][0][1][0][RTW89_WW][2][55] = 68, @@ -36797,19 +37222,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][2][44] = 68, [0][1][1][0][RTW89_WW][0][45] = -2, [0][1][1][0][RTW89_WW][1][45] = -2, - [0][1][1][0][RTW89_WW][2][45] = 0, + [0][1][1][0][RTW89_WW][2][45] = 70, [0][1][1][0][RTW89_WW][0][47] = -2, [0][1][1][0][RTW89_WW][1][47] = -2, - [0][1][1][0][RTW89_WW][2][47] = 0, + [0][1][1][0][RTW89_WW][2][47] = 68, [0][1][1][0][RTW89_WW][0][49] = -2, [0][1][1][0][RTW89_WW][1][49] = -2, - [0][1][1][0][RTW89_WW][2][49] = 0, + [0][1][1][0][RTW89_WW][2][49] = 68, [0][1][1][0][RTW89_WW][0][51] = -2, [0][1][1][0][RTW89_WW][1][51] = -2, - [0][1][1][0][RTW89_WW][2][51] = 0, + [0][1][1][0][RTW89_WW][2][51] = 68, [0][1][1][0][RTW89_WW][0][53] = -2, [0][1][1][0][RTW89_WW][1][53] = -2, - [0][1][1][0][RTW89_WW][2][53] = 0, + [0][1][1][0][RTW89_WW][2][53] = 68, [0][1][1][0][RTW89_WW][0][55] = -2, [0][1][1][0][RTW89_WW][1][55] = -2, [0][1][1][0][RTW89_WW][2][55] = 68, @@ -36989,19 +37414,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][2][44] = 70, [0][0][2][0][RTW89_WW][0][45] = 22, [0][0][2][0][RTW89_WW][1][45] = 22, - [0][0][2][0][RTW89_WW][2][45] = 0, + [0][0][2][0][RTW89_WW][2][45] = 70, [0][0][2][0][RTW89_WW][0][47] = 22, [0][0][2][0][RTW89_WW][1][47] = 22, - [0][0][2][0][RTW89_WW][2][47] = 0, + [0][0][2][0][RTW89_WW][2][47] = 70, [0][0][2][0][RTW89_WW][0][49] = 24, [0][0][2][0][RTW89_WW][1][49] = 24, - [0][0][2][0][RTW89_WW][2][49] = 0, + [0][0][2][0][RTW89_WW][2][49] = 70, [0][0][2][0][RTW89_WW][0][51] = 22, [0][0][2][0][RTW89_WW][1][51] = 22, - [0][0][2][0][RTW89_WW][2][51] = 0, + [0][0][2][0][RTW89_WW][2][51] = 70, [0][0][2][0][RTW89_WW][0][53] = 22, [0][0][2][0][RTW89_WW][1][53] = 22, - [0][0][2][0][RTW89_WW][2][53] = 0, + [0][0][2][0][RTW89_WW][2][53] = 70, [0][0][2][0][RTW89_WW][0][55] = 22, [0][0][2][0][RTW89_WW][1][55] = 22, [0][0][2][0][RTW89_WW][2][55] = 68, @@ -37181,19 +37606,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_WW][2][44] = 68, [0][1][2][0][RTW89_WW][0][45] = -2, [0][1][2][0][RTW89_WW][1][45] = -2, - [0][1][2][0][RTW89_WW][2][45] = 0, + [0][1][2][0][RTW89_WW][2][45] = 70, [0][1][2][0][RTW89_WW][0][47] = -2, [0][1][2][0][RTW89_WW][1][47] = -2, - [0][1][2][0][RTW89_WW][2][47] = 0, + [0][1][2][0][RTW89_WW][2][47] = 68, [0][1][2][0][RTW89_WW][0][49] = -2, [0][1][2][0][RTW89_WW][1][49] = -2, - [0][1][2][0][RTW89_WW][2][49] = 0, + [0][1][2][0][RTW89_WW][2][49] = 68, [0][1][2][0][RTW89_WW][0][51] = -2, [0][1][2][0][RTW89_WW][1][51] = -2, - [0][1][2][0][RTW89_WW][2][51] = 0, + [0][1][2][0][RTW89_WW][2][51] = 68, [0][1][2][0][RTW89_WW][0][53] = -2, [0][1][2][0][RTW89_WW][1][53] = -2, - [0][1][2][0][RTW89_WW][2][53] = 0, + [0][1][2][0][RTW89_WW][2][53] = 68, [0][1][2][0][RTW89_WW][0][55] = -2, [0][1][2][0][RTW89_WW][1][55] = -2, [0][1][2][0][RTW89_WW][2][55] = 68, @@ -37373,19 +37798,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][2][44] = 68, [0][1][2][1][RTW89_WW][0][45] = -2, [0][1][2][1][RTW89_WW][1][45] = -2, - [0][1][2][1][RTW89_WW][2][45] = 0, + [0][1][2][1][RTW89_WW][2][45] = 70, [0][1][2][1][RTW89_WW][0][47] = -2, [0][1][2][1][RTW89_WW][1][47] = -2, - [0][1][2][1][RTW89_WW][2][47] = 0, + [0][1][2][1][RTW89_WW][2][47] = 68, [0][1][2][1][RTW89_WW][0][49] = -2, [0][1][2][1][RTW89_WW][1][49] = -2, - [0][1][2][1][RTW89_WW][2][49] = 0, + [0][1][2][1][RTW89_WW][2][49] = 68, [0][1][2][1][RTW89_WW][0][51] = -2, [0][1][2][1][RTW89_WW][1][51] = -2, - [0][1][2][1][RTW89_WW][2][51] = 0, + [0][1][2][1][RTW89_WW][2][51] = 68, [0][1][2][1][RTW89_WW][0][53] = -2, [0][1][2][1][RTW89_WW][1][53] = -2, - [0][1][2][1][RTW89_WW][2][53] = 0, + [0][1][2][1][RTW89_WW][2][53] = 68, [0][1][2][1][RTW89_WW][0][55] = -2, [0][1][2][1][RTW89_WW][1][55] = -2, [0][1][2][1][RTW89_WW][2][55] = 68, @@ -37529,10 +37954,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][2][43] = 70, [1][0][2][0][RTW89_WW][0][46] = 34, [1][0][2][0][RTW89_WW][1][46] = 34, - [1][0][2][0][RTW89_WW][2][46] = 0, + [1][0][2][0][RTW89_WW][2][46] = 68, [1][0][2][0][RTW89_WW][0][50] = 34, [1][0][2][0][RTW89_WW][1][50] = 34, - [1][0][2][0][RTW89_WW][2][50] = 0, + [1][0][2][0][RTW89_WW][2][50] = 68, [1][0][2][0][RTW89_WW][0][54] = 36, [1][0][2][0][RTW89_WW][1][54] = 36, [1][0][2][0][RTW89_WW][2][54] = 0, @@ -37625,10 +38050,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_WW][2][43] = 70, [1][1][2][0][RTW89_WW][0][46] = 12, [1][1][2][0][RTW89_WW][1][46] = 12, - [1][1][2][0][RTW89_WW][2][46] = 0, + [1][1][2][0][RTW89_WW][2][46] = 68, [1][1][2][0][RTW89_WW][0][50] = 12, [1][1][2][0][RTW89_WW][1][50] = 12, - [1][1][2][0][RTW89_WW][2][50] = 0, + [1][1][2][0][RTW89_WW][2][50] = 68, [1][1][2][0][RTW89_WW][0][54] = 10, [1][1][2][0][RTW89_WW][1][54] = 10, [1][1][2][0][RTW89_WW][2][54] = 0, @@ -37721,10 +38146,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][2][43] = 70, [1][1][2][1][RTW89_WW][0][46] = 12, [1][1][2][1][RTW89_WW][1][46] = 12, - [1][1][2][1][RTW89_WW][2][46] = 0, + [1][1][2][1][RTW89_WW][2][46] = 68, [1][1][2][1][RTW89_WW][0][50] = 12, [1][1][2][1][RTW89_WW][1][50] = 12, - [1][1][2][1][RTW89_WW][2][50] = 0, + [1][1][2][1][RTW89_WW][2][50] = 68, [1][1][2][1][RTW89_WW][0][54] = 10, [1][1][2][1][RTW89_WW][1][54] = 10, [1][1][2][1][RTW89_WW][2][54] = 0, @@ -37799,10 +38224,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_WW][2][41] = 60, [2][0][2][0][RTW89_WW][0][48] = 46, [2][0][2][0][RTW89_WW][1][48] = 46, - [2][0][2][0][RTW89_WW][2][48] = 0, + [2][0][2][0][RTW89_WW][2][48] = 60, [2][0][2][0][RTW89_WW][0][56] = 46, [2][0][2][0][RTW89_WW][1][56] = 46, - [2][0][2][0][RTW89_WW][2][56] = 0, + [2][0][2][0][RTW89_WW][2][56] = 58, [2][0][2][0][RTW89_WW][0][63] = 46, [2][0][2][0][RTW89_WW][1][63] = 46, [2][0][2][0][RTW89_WW][2][63] = 58, @@ -37847,10 +38272,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_WW][2][41] = 60, [2][1][2][0][RTW89_WW][0][48] = 22, [2][1][2][0][RTW89_WW][1][48] = 22, - [2][1][2][0][RTW89_WW][2][48] = 0, + [2][1][2][0][RTW89_WW][2][48] = 60, [2][1][2][0][RTW89_WW][0][56] = 20, [2][1][2][0][RTW89_WW][1][56] = 20, - [2][1][2][0][RTW89_WW][2][56] = 0, + [2][1][2][0][RTW89_WW][2][56] = 56, [2][1][2][0][RTW89_WW][0][63] = 22, [2][1][2][0][RTW89_WW][1][63] = 22, [2][1][2][0][RTW89_WW][2][63] = 58, @@ -37895,10 +38320,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_WW][2][41] = 60, [2][1][2][1][RTW89_WW][0][48] = 22, [2][1][2][1][RTW89_WW][1][48] = 22, - [2][1][2][1][RTW89_WW][2][48] = 0, + [2][1][2][1][RTW89_WW][2][48] = 60, [2][1][2][1][RTW89_WW][0][56] = 20, [2][1][2][1][RTW89_WW][1][56] = 20, - [2][1][2][1][RTW89_WW][2][56] = 0, + [2][1][2][1][RTW89_WW][2][56] = 56, [2][1][2][1][RTW89_WW][0][63] = 22, [2][1][2][1][RTW89_WW][1][63] = 22, [2][1][2][1][RTW89_WW][2][63] = 58, @@ -37934,7 +38359,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_WW][2][37] = 52, [3][0][2][0][RTW89_WW][0][52] = 54, [3][0][2][0][RTW89_WW][1][52] = 54, - [3][0][2][0][RTW89_WW][2][52] = 0, + [3][0][2][0][RTW89_WW][2][52] = 56, [3][0][2][0][RTW89_WW][0][67] = 54, [3][0][2][0][RTW89_WW][1][67] = 54, [3][0][2][0][RTW89_WW][2][67] = 54, @@ -37958,7 +38383,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_WW][2][37] = 52, [3][1][2][0][RTW89_WW][0][52] = 30, [3][1][2][0][RTW89_WW][1][52] = 30, - [3][1][2][0][RTW89_WW][2][52] = 0, + [3][1][2][0][RTW89_WW][2][52] = 56, [3][1][2][0][RTW89_WW][0][67] = 32, [3][1][2][0][RTW89_WW][1][67] = 32, [3][1][2][0][RTW89_WW][2][67] = 54, @@ -37982,7 +38407,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_WW][2][37] = 52, [3][1][2][1][RTW89_WW][0][52] = 30, [3][1][2][1][RTW89_WW][1][52] = 30, - [3][1][2][1][RTW89_WW][2][52] = 0, + [3][1][2][1][RTW89_WW][2][52] = 56, [3][1][2][1][RTW89_WW][0][67] = 32, [3][1][2][1][RTW89_WW][1][67] = 32, [3][1][2][1][RTW89_WW][2][67] = 54, @@ -38002,6 +38427,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][0] = 66, [0][0][1][0][RTW89_MKK][0][0] = 26, [0][0][1][0][RTW89_IC][1][0] = 24, + [0][0][1][0][RTW89_IC][2][0] = 56, [0][0][1][0][RTW89_KCC][1][0] = 24, [0][0][1][0][RTW89_KCC][0][0] = 24, [0][0][1][0][RTW89_ACMA][1][0] = 66, @@ -38011,6 +38437,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][0] = 28, [0][0][1][0][RTW89_UK][1][0] = 66, [0][0][1][0][RTW89_UK][0][0] = 28, + [0][0][1][0][RTW89_THAILAND][1][0] = 56, + [0][0][1][0][RTW89_THAILAND][0][0] = 24, [0][0][1][0][RTW89_FCC][1][2] = 22, [0][0][1][0][RTW89_FCC][2][2] = 56, [0][0][1][0][RTW89_ETSI][1][2] = 66, @@ -38018,6 +38446,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][2] = 66, [0][0][1][0][RTW89_MKK][0][2] = 26, [0][0][1][0][RTW89_IC][1][2] = 22, + [0][0][1][0][RTW89_IC][2][2] = 56, [0][0][1][0][RTW89_KCC][1][2] = 24, [0][0][1][0][RTW89_KCC][0][2] = 24, [0][0][1][0][RTW89_ACMA][1][2] = 66, @@ -38027,6 +38456,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][2] = 28, [0][0][1][0][RTW89_UK][1][2] = 66, [0][0][1][0][RTW89_UK][0][2] = 28, + [0][0][1][0][RTW89_THAILAND][1][2] = 56, + [0][0][1][0][RTW89_THAILAND][0][2] = 22, [0][0][1][0][RTW89_FCC][1][4] = 22, [0][0][1][0][RTW89_FCC][2][4] = 56, [0][0][1][0][RTW89_ETSI][1][4] = 66, @@ -38034,6 +38465,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][4] = 66, [0][0][1][0][RTW89_MKK][0][4] = 26, [0][0][1][0][RTW89_IC][1][4] = 22, + [0][0][1][0][RTW89_IC][2][4] = 56, [0][0][1][0][RTW89_KCC][1][4] = 24, [0][0][1][0][RTW89_KCC][0][4] = 24, [0][0][1][0][RTW89_ACMA][1][4] = 66, @@ -38043,6 +38475,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][4] = 28, [0][0][1][0][RTW89_UK][1][4] = 66, [0][0][1][0][RTW89_UK][0][4] = 28, + [0][0][1][0][RTW89_THAILAND][1][4] = 56, + [0][0][1][0][RTW89_THAILAND][0][4] = 22, [0][0][1][0][RTW89_FCC][1][6] = 22, [0][0][1][0][RTW89_FCC][2][6] = 56, [0][0][1][0][RTW89_ETSI][1][6] = 66, @@ -38050,6 +38484,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][6] = 66, [0][0][1][0][RTW89_MKK][0][6] = 26, [0][0][1][0][RTW89_IC][1][6] = 22, + [0][0][1][0][RTW89_IC][2][6] = 56, [0][0][1][0][RTW89_KCC][1][6] = 24, [0][0][1][0][RTW89_KCC][0][6] = 24, [0][0][1][0][RTW89_ACMA][1][6] = 66, @@ -38059,6 +38494,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][6] = 28, [0][0][1][0][RTW89_UK][1][6] = 66, [0][0][1][0][RTW89_UK][0][6] = 28, + [0][0][1][0][RTW89_THAILAND][1][6] = 56, + [0][0][1][0][RTW89_THAILAND][0][6] = 22, [0][0][1][0][RTW89_FCC][1][8] = 22, [0][0][1][0][RTW89_FCC][2][8] = 56, [0][0][1][0][RTW89_ETSI][1][8] = 66, @@ -38066,6 +38503,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][8] = 66, [0][0][1][0][RTW89_MKK][0][8] = 26, [0][0][1][0][RTW89_IC][1][8] = 22, + [0][0][1][0][RTW89_IC][2][8] = 56, [0][0][1][0][RTW89_KCC][1][8] = 24, [0][0][1][0][RTW89_KCC][0][8] = 24, [0][0][1][0][RTW89_ACMA][1][8] = 66, @@ -38075,6 +38513,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][8] = 28, [0][0][1][0][RTW89_UK][1][8] = 66, [0][0][1][0][RTW89_UK][0][8] = 28, + [0][0][1][0][RTW89_THAILAND][1][8] = 56, + [0][0][1][0][RTW89_THAILAND][0][8] = 22, [0][0][1][0][RTW89_FCC][1][10] = 22, [0][0][1][0][RTW89_FCC][2][10] = 56, [0][0][1][0][RTW89_ETSI][1][10] = 66, @@ -38082,6 +38522,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][10] = 66, [0][0][1][0][RTW89_MKK][0][10] = 26, [0][0][1][0][RTW89_IC][1][10] = 22, + [0][0][1][0][RTW89_IC][2][10] = 56, [0][0][1][0][RTW89_KCC][1][10] = 24, [0][0][1][0][RTW89_KCC][0][10] = 24, [0][0][1][0][RTW89_ACMA][1][10] = 66, @@ -38091,6 +38532,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][10] = 28, [0][0][1][0][RTW89_UK][1][10] = 66, [0][0][1][0][RTW89_UK][0][10] = 28, + [0][0][1][0][RTW89_THAILAND][1][10] = 56, + [0][0][1][0][RTW89_THAILAND][0][10] = 22, [0][0][1][0][RTW89_FCC][1][12] = 22, [0][0][1][0][RTW89_FCC][2][12] = 56, [0][0][1][0][RTW89_ETSI][1][12] = 66, @@ -38098,6 +38541,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][12] = 66, [0][0][1][0][RTW89_MKK][0][12] = 26, [0][0][1][0][RTW89_IC][1][12] = 22, + [0][0][1][0][RTW89_IC][2][12] = 56, [0][0][1][0][RTW89_KCC][1][12] = 24, [0][0][1][0][RTW89_KCC][0][12] = 24, [0][0][1][0][RTW89_ACMA][1][12] = 66, @@ -38107,6 +38551,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][12] = 28, [0][0][1][0][RTW89_UK][1][12] = 66, [0][0][1][0][RTW89_UK][0][12] = 28, + [0][0][1][0][RTW89_THAILAND][1][12] = 56, + [0][0][1][0][RTW89_THAILAND][0][12] = 22, [0][0][1][0][RTW89_FCC][1][14] = 22, [0][0][1][0][RTW89_FCC][2][14] = 56, [0][0][1][0][RTW89_ETSI][1][14] = 66, @@ -38114,6 +38560,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][14] = 66, [0][0][1][0][RTW89_MKK][0][14] = 26, [0][0][1][0][RTW89_IC][1][14] = 22, + [0][0][1][0][RTW89_IC][2][14] = 56, [0][0][1][0][RTW89_KCC][1][14] = 24, [0][0][1][0][RTW89_KCC][0][14] = 24, [0][0][1][0][RTW89_ACMA][1][14] = 66, @@ -38123,6 +38570,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][14] = 28, [0][0][1][0][RTW89_UK][1][14] = 66, [0][0][1][0][RTW89_UK][0][14] = 28, + [0][0][1][0][RTW89_THAILAND][1][14] = 56, + [0][0][1][0][RTW89_THAILAND][0][14] = 22, [0][0][1][0][RTW89_FCC][1][15] = 22, [0][0][1][0][RTW89_FCC][2][15] = 56, [0][0][1][0][RTW89_ETSI][1][15] = 66, @@ -38130,6 +38579,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][15] = 66, [0][0][1][0][RTW89_MKK][0][15] = 26, [0][0][1][0][RTW89_IC][1][15] = 22, + [0][0][1][0][RTW89_IC][2][15] = 56, [0][0][1][0][RTW89_KCC][1][15] = 24, [0][0][1][0][RTW89_KCC][0][15] = 24, [0][0][1][0][RTW89_ACMA][1][15] = 66, @@ -38139,6 +38589,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][15] = 28, [0][0][1][0][RTW89_UK][1][15] = 66, [0][0][1][0][RTW89_UK][0][15] = 28, + [0][0][1][0][RTW89_THAILAND][1][15] = 56, + [0][0][1][0][RTW89_THAILAND][0][15] = 22, [0][0][1][0][RTW89_FCC][1][17] = 22, [0][0][1][0][RTW89_FCC][2][17] = 56, [0][0][1][0][RTW89_ETSI][1][17] = 66, @@ -38146,6 +38598,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][17] = 66, [0][0][1][0][RTW89_MKK][0][17] = 26, [0][0][1][0][RTW89_IC][1][17] = 22, + [0][0][1][0][RTW89_IC][2][17] = 56, [0][0][1][0][RTW89_KCC][1][17] = 24, [0][0][1][0][RTW89_KCC][0][17] = 24, [0][0][1][0][RTW89_ACMA][1][17] = 66, @@ -38155,6 +38608,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][17] = 28, [0][0][1][0][RTW89_UK][1][17] = 66, [0][0][1][0][RTW89_UK][0][17] = 28, + [0][0][1][0][RTW89_THAILAND][1][17] = 56, + [0][0][1][0][RTW89_THAILAND][0][17] = 22, [0][0][1][0][RTW89_FCC][1][19] = 22, [0][0][1][0][RTW89_FCC][2][19] = 56, [0][0][1][0][RTW89_ETSI][1][19] = 66, @@ -38162,6 +38617,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][19] = 66, [0][0][1][0][RTW89_MKK][0][19] = 26, [0][0][1][0][RTW89_IC][1][19] = 22, + [0][0][1][0][RTW89_IC][2][19] = 56, [0][0][1][0][RTW89_KCC][1][19] = 24, [0][0][1][0][RTW89_KCC][0][19] = 24, [0][0][1][0][RTW89_ACMA][1][19] = 66, @@ -38171,6 +38627,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][19] = 28, [0][0][1][0][RTW89_UK][1][19] = 66, [0][0][1][0][RTW89_UK][0][19] = 28, + [0][0][1][0][RTW89_THAILAND][1][19] = 56, + [0][0][1][0][RTW89_THAILAND][0][19] = 22, [0][0][1][0][RTW89_FCC][1][21] = 22, [0][0][1][0][RTW89_FCC][2][21] = 56, [0][0][1][0][RTW89_ETSI][1][21] = 66, @@ -38178,6 +38636,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][21] = 66, [0][0][1][0][RTW89_MKK][0][21] = 26, [0][0][1][0][RTW89_IC][1][21] = 22, + [0][0][1][0][RTW89_IC][2][21] = 56, [0][0][1][0][RTW89_KCC][1][21] = 24, [0][0][1][0][RTW89_KCC][0][21] = 24, [0][0][1][0][RTW89_ACMA][1][21] = 66, @@ -38187,6 +38646,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][21] = 28, [0][0][1][0][RTW89_UK][1][21] = 66, [0][0][1][0][RTW89_UK][0][21] = 28, + [0][0][1][0][RTW89_THAILAND][1][21] = 56, + [0][0][1][0][RTW89_THAILAND][0][21] = 22, [0][0][1][0][RTW89_FCC][1][23] = 22, [0][0][1][0][RTW89_FCC][2][23] = 70, [0][0][1][0][RTW89_ETSI][1][23] = 66, @@ -38194,6 +38655,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][23] = 66, [0][0][1][0][RTW89_MKK][0][23] = 26, [0][0][1][0][RTW89_IC][1][23] = 22, + [0][0][1][0][RTW89_IC][2][23] = 70, [0][0][1][0][RTW89_KCC][1][23] = 24, [0][0][1][0][RTW89_KCC][0][23] = 26, [0][0][1][0][RTW89_ACMA][1][23] = 66, @@ -38203,6 +38665,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][23] = 28, [0][0][1][0][RTW89_UK][1][23] = 66, [0][0][1][0][RTW89_UK][0][23] = 28, + [0][0][1][0][RTW89_THAILAND][1][23] = 66, + [0][0][1][0][RTW89_THAILAND][0][23] = 22, [0][0][1][0][RTW89_FCC][1][25] = 22, [0][0][1][0][RTW89_FCC][2][25] = 70, [0][0][1][0][RTW89_ETSI][1][25] = 66, @@ -38210,6 +38674,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][25] = 66, [0][0][1][0][RTW89_MKK][0][25] = 26, [0][0][1][0][RTW89_IC][1][25] = 22, + [0][0][1][0][RTW89_IC][2][25] = 70, [0][0][1][0][RTW89_KCC][1][25] = 24, [0][0][1][0][RTW89_KCC][0][25] = 26, [0][0][1][0][RTW89_ACMA][1][25] = 66, @@ -38219,6 +38684,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][25] = 28, [0][0][1][0][RTW89_UK][1][25] = 66, [0][0][1][0][RTW89_UK][0][25] = 28, + [0][0][1][0][RTW89_THAILAND][1][25] = 66, + [0][0][1][0][RTW89_THAILAND][0][25] = 22, [0][0][1][0][RTW89_FCC][1][27] = 22, [0][0][1][0][RTW89_FCC][2][27] = 70, [0][0][1][0][RTW89_ETSI][1][27] = 66, @@ -38226,6 +38693,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][27] = 66, [0][0][1][0][RTW89_MKK][0][27] = 26, [0][0][1][0][RTW89_IC][1][27] = 22, + [0][0][1][0][RTW89_IC][2][27] = 70, [0][0][1][0][RTW89_KCC][1][27] = 24, [0][0][1][0][RTW89_KCC][0][27] = 26, [0][0][1][0][RTW89_ACMA][1][27] = 66, @@ -38235,6 +38703,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][27] = 28, [0][0][1][0][RTW89_UK][1][27] = 66, [0][0][1][0][RTW89_UK][0][27] = 28, + [0][0][1][0][RTW89_THAILAND][1][27] = 66, + [0][0][1][0][RTW89_THAILAND][0][27] = 22, [0][0][1][0][RTW89_FCC][1][29] = 22, [0][0][1][0][RTW89_FCC][2][29] = 70, [0][0][1][0][RTW89_ETSI][1][29] = 66, @@ -38242,6 +38712,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][29] = 66, [0][0][1][0][RTW89_MKK][0][29] = 26, [0][0][1][0][RTW89_IC][1][29] = 22, + [0][0][1][0][RTW89_IC][2][29] = 70, [0][0][1][0][RTW89_KCC][1][29] = 24, [0][0][1][0][RTW89_KCC][0][29] = 26, [0][0][1][0][RTW89_ACMA][1][29] = 66, @@ -38251,6 +38722,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][29] = 28, [0][0][1][0][RTW89_UK][1][29] = 66, [0][0][1][0][RTW89_UK][0][29] = 28, + [0][0][1][0][RTW89_THAILAND][1][29] = 66, + [0][0][1][0][RTW89_THAILAND][0][29] = 22, [0][0][1][0][RTW89_FCC][1][30] = 22, [0][0][1][0][RTW89_FCC][2][30] = 70, [0][0][1][0][RTW89_ETSI][1][30] = 66, @@ -38258,6 +38731,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][30] = 66, [0][0][1][0][RTW89_MKK][0][30] = 26, [0][0][1][0][RTW89_IC][1][30] = 22, + [0][0][1][0][RTW89_IC][2][30] = 70, [0][0][1][0][RTW89_KCC][1][30] = 24, [0][0][1][0][RTW89_KCC][0][30] = 26, [0][0][1][0][RTW89_ACMA][1][30] = 66, @@ -38267,6 +38741,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][30] = 28, [0][0][1][0][RTW89_UK][1][30] = 66, [0][0][1][0][RTW89_UK][0][30] = 28, + [0][0][1][0][RTW89_THAILAND][1][30] = 66, + [0][0][1][0][RTW89_THAILAND][0][30] = 22, [0][0][1][0][RTW89_FCC][1][32] = 22, [0][0][1][0][RTW89_FCC][2][32] = 70, [0][0][1][0][RTW89_ETSI][1][32] = 66, @@ -38274,6 +38750,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][32] = 66, [0][0][1][0][RTW89_MKK][0][32] = 26, [0][0][1][0][RTW89_IC][1][32] = 22, + [0][0][1][0][RTW89_IC][2][32] = 70, [0][0][1][0][RTW89_KCC][1][32] = 24, [0][0][1][0][RTW89_KCC][0][32] = 26, [0][0][1][0][RTW89_ACMA][1][32] = 66, @@ -38283,6 +38760,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][32] = 28, [0][0][1][0][RTW89_UK][1][32] = 66, [0][0][1][0][RTW89_UK][0][32] = 28, + [0][0][1][0][RTW89_THAILAND][1][32] = 66, + [0][0][1][0][RTW89_THAILAND][0][32] = 22, [0][0][1][0][RTW89_FCC][1][34] = 22, [0][0][1][0][RTW89_FCC][2][34] = 70, [0][0][1][0][RTW89_ETSI][1][34] = 66, @@ -38290,6 +38769,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][34] = 66, [0][0][1][0][RTW89_MKK][0][34] = 26, [0][0][1][0][RTW89_IC][1][34] = 22, + [0][0][1][0][RTW89_IC][2][34] = 70, [0][0][1][0][RTW89_KCC][1][34] = 24, [0][0][1][0][RTW89_KCC][0][34] = 26, [0][0][1][0][RTW89_ACMA][1][34] = 66, @@ -38299,6 +38779,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][34] = 28, [0][0][1][0][RTW89_UK][1][34] = 66, [0][0][1][0][RTW89_UK][0][34] = 28, + [0][0][1][0][RTW89_THAILAND][1][34] = 66, + [0][0][1][0][RTW89_THAILAND][0][34] = 22, [0][0][1][0][RTW89_FCC][1][36] = 22, [0][0][1][0][RTW89_FCC][2][36] = 70, [0][0][1][0][RTW89_ETSI][1][36] = 66, @@ -38306,6 +38788,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][36] = 66, [0][0][1][0][RTW89_MKK][0][36] = 26, [0][0][1][0][RTW89_IC][1][36] = 22, + [0][0][1][0][RTW89_IC][2][36] = 70, [0][0][1][0][RTW89_KCC][1][36] = 24, [0][0][1][0][RTW89_KCC][0][36] = 26, [0][0][1][0][RTW89_ACMA][1][36] = 66, @@ -38315,6 +38798,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][36] = 28, [0][0][1][0][RTW89_UK][1][36] = 66, [0][0][1][0][RTW89_UK][0][36] = 28, + [0][0][1][0][RTW89_THAILAND][1][36] = 66, + [0][0][1][0][RTW89_THAILAND][0][36] = 22, [0][0][1][0][RTW89_FCC][1][38] = 22, [0][0][1][0][RTW89_FCC][2][38] = 70, [0][0][1][0][RTW89_ETSI][1][38] = 66, @@ -38322,6 +38807,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][38] = 66, [0][0][1][0][RTW89_MKK][0][38] = 26, [0][0][1][0][RTW89_IC][1][38] = 22, + [0][0][1][0][RTW89_IC][2][38] = 70, [0][0][1][0][RTW89_KCC][1][38] = 24, [0][0][1][0][RTW89_KCC][0][38] = 26, [0][0][1][0][RTW89_ACMA][1][38] = 66, @@ -38331,6 +38817,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][38] = 28, [0][0][1][0][RTW89_UK][1][38] = 66, [0][0][1][0][RTW89_UK][0][38] = 28, + [0][0][1][0][RTW89_THAILAND][1][38] = 66, + [0][0][1][0][RTW89_THAILAND][0][38] = 22, [0][0][1][0][RTW89_FCC][1][40] = 22, [0][0][1][0][RTW89_FCC][2][40] = 70, [0][0][1][0][RTW89_ETSI][1][40] = 66, @@ -38338,6 +38826,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][40] = 66, [0][0][1][0][RTW89_MKK][0][40] = 26, [0][0][1][0][RTW89_IC][1][40] = 22, + [0][0][1][0][RTW89_IC][2][40] = 70, [0][0][1][0][RTW89_KCC][1][40] = 24, [0][0][1][0][RTW89_KCC][0][40] = 26, [0][0][1][0][RTW89_ACMA][1][40] = 66, @@ -38347,6 +38836,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][40] = 28, [0][0][1][0][RTW89_UK][1][40] = 66, [0][0][1][0][RTW89_UK][0][40] = 28, + [0][0][1][0][RTW89_THAILAND][1][40] = 66, + [0][0][1][0][RTW89_THAILAND][0][40] = 22, [0][0][1][0][RTW89_FCC][1][42] = 22, [0][0][1][0][RTW89_FCC][2][42] = 70, [0][0][1][0][RTW89_ETSI][1][42] = 66, @@ -38354,6 +38845,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][42] = 66, [0][0][1][0][RTW89_MKK][0][42] = 26, [0][0][1][0][RTW89_IC][1][42] = 22, + [0][0][1][0][RTW89_IC][2][42] = 70, [0][0][1][0][RTW89_KCC][1][42] = 24, [0][0][1][0][RTW89_KCC][0][42] = 26, [0][0][1][0][RTW89_ACMA][1][42] = 66, @@ -38363,6 +38855,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][42] = 28, [0][0][1][0][RTW89_UK][1][42] = 66, [0][0][1][0][RTW89_UK][0][42] = 28, + [0][0][1][0][RTW89_THAILAND][1][42] = 66, + [0][0][1][0][RTW89_THAILAND][0][42] = 22, [0][0][1][0][RTW89_FCC][1][44] = 22, [0][0][1][0][RTW89_FCC][2][44] = 70, [0][0][1][0][RTW89_ETSI][1][44] = 66, @@ -38370,6 +38864,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][44] = 44, [0][0][1][0][RTW89_MKK][0][44] = 28, [0][0][1][0][RTW89_IC][1][44] = 22, + [0][0][1][0][RTW89_IC][2][44] = 70, [0][0][1][0][RTW89_KCC][1][44] = 24, [0][0][1][0][RTW89_KCC][0][44] = 26, [0][0][1][0][RTW89_ACMA][1][44] = 66, @@ -38379,6 +38874,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][44] = 30, [0][0][1][0][RTW89_UK][1][44] = 66, [0][0][1][0][RTW89_UK][0][44] = 30, + [0][0][1][0][RTW89_THAILAND][1][44] = 68, + [0][0][1][0][RTW89_THAILAND][0][44] = 22, [0][0][1][0][RTW89_FCC][1][45] = 22, [0][0][1][0][RTW89_FCC][2][45] = 127, [0][0][1][0][RTW89_ETSI][1][45] = 127, @@ -38386,6 +38883,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][45] = 127, [0][0][1][0][RTW89_MKK][0][45] = 127, [0][0][1][0][RTW89_IC][1][45] = 22, + [0][0][1][0][RTW89_IC][2][45] = 70, [0][0][1][0][RTW89_KCC][1][45] = 24, [0][0][1][0][RTW89_KCC][0][45] = 127, [0][0][1][0][RTW89_ACMA][1][45] = 127, @@ -38395,6 +38893,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][45] = 127, [0][0][1][0][RTW89_UK][1][45] = 127, [0][0][1][0][RTW89_UK][0][45] = 127, + [0][0][1][0][RTW89_THAILAND][1][45] = 127, + [0][0][1][0][RTW89_THAILAND][0][45] = 127, [0][0][1][0][RTW89_FCC][1][47] = 22, [0][0][1][0][RTW89_FCC][2][47] = 127, [0][0][1][0][RTW89_ETSI][1][47] = 127, @@ -38402,6 +38902,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][47] = 127, [0][0][1][0][RTW89_MKK][0][47] = 127, [0][0][1][0][RTW89_IC][1][47] = 22, + [0][0][1][0][RTW89_IC][2][47] = 70, [0][0][1][0][RTW89_KCC][1][47] = 24, [0][0][1][0][RTW89_KCC][0][47] = 127, [0][0][1][0][RTW89_ACMA][1][47] = 127, @@ -38411,6 +38912,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][47] = 127, [0][0][1][0][RTW89_UK][1][47] = 127, [0][0][1][0][RTW89_UK][0][47] = 127, + [0][0][1][0][RTW89_THAILAND][1][47] = 127, + [0][0][1][0][RTW89_THAILAND][0][47] = 127, [0][0][1][0][RTW89_FCC][1][49] = 24, [0][0][1][0][RTW89_FCC][2][49] = 127, [0][0][1][0][RTW89_ETSI][1][49] = 127, @@ -38418,6 +38921,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][49] = 127, [0][0][1][0][RTW89_MKK][0][49] = 127, [0][0][1][0][RTW89_IC][1][49] = 24, + [0][0][1][0][RTW89_IC][2][49] = 70, [0][0][1][0][RTW89_KCC][1][49] = 24, [0][0][1][0][RTW89_KCC][0][49] = 127, [0][0][1][0][RTW89_ACMA][1][49] = 127, @@ -38427,6 +38931,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][49] = 127, [0][0][1][0][RTW89_UK][1][49] = 127, [0][0][1][0][RTW89_UK][0][49] = 127, + [0][0][1][0][RTW89_THAILAND][1][49] = 127, + [0][0][1][0][RTW89_THAILAND][0][49] = 127, [0][0][1][0][RTW89_FCC][1][51] = 22, [0][0][1][0][RTW89_FCC][2][51] = 127, [0][0][1][0][RTW89_ETSI][1][51] = 127, @@ -38434,6 +38940,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][51] = 127, [0][0][1][0][RTW89_MKK][0][51] = 127, [0][0][1][0][RTW89_IC][1][51] = 22, + [0][0][1][0][RTW89_IC][2][51] = 70, [0][0][1][0][RTW89_KCC][1][51] = 24, [0][0][1][0][RTW89_KCC][0][51] = 127, [0][0][1][0][RTW89_ACMA][1][51] = 127, @@ -38443,6 +38950,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][51] = 127, [0][0][1][0][RTW89_UK][1][51] = 127, [0][0][1][0][RTW89_UK][0][51] = 127, + [0][0][1][0][RTW89_THAILAND][1][51] = 127, + [0][0][1][0][RTW89_THAILAND][0][51] = 127, [0][0][1][0][RTW89_FCC][1][53] = 22, [0][0][1][0][RTW89_FCC][2][53] = 127, [0][0][1][0][RTW89_ETSI][1][53] = 127, @@ -38450,6 +38959,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][53] = 127, [0][0][1][0][RTW89_MKK][0][53] = 127, [0][0][1][0][RTW89_IC][1][53] = 22, + [0][0][1][0][RTW89_IC][2][53] = 70, [0][0][1][0][RTW89_KCC][1][53] = 24, [0][0][1][0][RTW89_KCC][0][53] = 127, [0][0][1][0][RTW89_ACMA][1][53] = 127, @@ -38459,6 +38969,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][53] = 127, [0][0][1][0][RTW89_UK][1][53] = 127, [0][0][1][0][RTW89_UK][0][53] = 127, + [0][0][1][0][RTW89_THAILAND][1][53] = 127, + [0][0][1][0][RTW89_THAILAND][0][53] = 127, [0][0][1][0][RTW89_FCC][1][55] = 22, [0][0][1][0][RTW89_FCC][2][55] = 68, [0][0][1][0][RTW89_ETSI][1][55] = 127, @@ -38466,6 +38978,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][55] = 127, [0][0][1][0][RTW89_MKK][0][55] = 127, [0][0][1][0][RTW89_IC][1][55] = 22, + [0][0][1][0][RTW89_IC][2][55] = 68, [0][0][1][0][RTW89_KCC][1][55] = 26, [0][0][1][0][RTW89_KCC][0][55] = 127, [0][0][1][0][RTW89_ACMA][1][55] = 127, @@ -38475,6 +38988,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][55] = 127, [0][0][1][0][RTW89_UK][1][55] = 127, [0][0][1][0][RTW89_UK][0][55] = 127, + [0][0][1][0][RTW89_THAILAND][1][55] = 127, + [0][0][1][0][RTW89_THAILAND][0][55] = 127, [0][0][1][0][RTW89_FCC][1][57] = 22, [0][0][1][0][RTW89_FCC][2][57] = 68, [0][0][1][0][RTW89_ETSI][1][57] = 127, @@ -38482,6 +38997,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][57] = 127, [0][0][1][0][RTW89_MKK][0][57] = 127, [0][0][1][0][RTW89_IC][1][57] = 22, + [0][0][1][0][RTW89_IC][2][57] = 68, [0][0][1][0][RTW89_KCC][1][57] = 26, [0][0][1][0][RTW89_KCC][0][57] = 127, [0][0][1][0][RTW89_ACMA][1][57] = 127, @@ -38491,6 +39007,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][57] = 127, [0][0][1][0][RTW89_UK][1][57] = 127, [0][0][1][0][RTW89_UK][0][57] = 127, + [0][0][1][0][RTW89_THAILAND][1][57] = 127, + [0][0][1][0][RTW89_THAILAND][0][57] = 127, [0][0][1][0][RTW89_FCC][1][59] = 22, [0][0][1][0][RTW89_FCC][2][59] = 68, [0][0][1][0][RTW89_ETSI][1][59] = 127, @@ -38498,6 +39016,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][59] = 127, [0][0][1][0][RTW89_MKK][0][59] = 127, [0][0][1][0][RTW89_IC][1][59] = 22, + [0][0][1][0][RTW89_IC][2][59] = 68, [0][0][1][0][RTW89_KCC][1][59] = 26, [0][0][1][0][RTW89_KCC][0][59] = 127, [0][0][1][0][RTW89_ACMA][1][59] = 127, @@ -38507,6 +39026,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][59] = 127, [0][0][1][0][RTW89_UK][1][59] = 127, [0][0][1][0][RTW89_UK][0][59] = 127, + [0][0][1][0][RTW89_THAILAND][1][59] = 127, + [0][0][1][0][RTW89_THAILAND][0][59] = 127, [0][0][1][0][RTW89_FCC][1][60] = 22, [0][0][1][0][RTW89_FCC][2][60] = 68, [0][0][1][0][RTW89_ETSI][1][60] = 127, @@ -38514,6 +39035,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][60] = 127, [0][0][1][0][RTW89_MKK][0][60] = 127, [0][0][1][0][RTW89_IC][1][60] = 22, + [0][0][1][0][RTW89_IC][2][60] = 68, [0][0][1][0][RTW89_KCC][1][60] = 26, [0][0][1][0][RTW89_KCC][0][60] = 127, [0][0][1][0][RTW89_ACMA][1][60] = 127, @@ -38523,6 +39045,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][60] = 127, [0][0][1][0][RTW89_UK][1][60] = 127, [0][0][1][0][RTW89_UK][0][60] = 127, + [0][0][1][0][RTW89_THAILAND][1][60] = 127, + [0][0][1][0][RTW89_THAILAND][0][60] = 127, [0][0][1][0][RTW89_FCC][1][62] = 22, [0][0][1][0][RTW89_FCC][2][62] = 68, [0][0][1][0][RTW89_ETSI][1][62] = 127, @@ -38530,6 +39054,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][62] = 127, [0][0][1][0][RTW89_MKK][0][62] = 127, [0][0][1][0][RTW89_IC][1][62] = 22, + [0][0][1][0][RTW89_IC][2][62] = 68, [0][0][1][0][RTW89_KCC][1][62] = 26, [0][0][1][0][RTW89_KCC][0][62] = 127, [0][0][1][0][RTW89_ACMA][1][62] = 127, @@ -38539,6 +39064,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][62] = 127, [0][0][1][0][RTW89_UK][1][62] = 127, [0][0][1][0][RTW89_UK][0][62] = 127, + [0][0][1][0][RTW89_THAILAND][1][62] = 127, + [0][0][1][0][RTW89_THAILAND][0][62] = 127, [0][0][1][0][RTW89_FCC][1][64] = 22, [0][0][1][0][RTW89_FCC][2][64] = 68, [0][0][1][0][RTW89_ETSI][1][64] = 127, @@ -38546,6 +39073,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][64] = 127, [0][0][1][0][RTW89_MKK][0][64] = 127, [0][0][1][0][RTW89_IC][1][64] = 22, + [0][0][1][0][RTW89_IC][2][64] = 68, [0][0][1][0][RTW89_KCC][1][64] = 26, [0][0][1][0][RTW89_KCC][0][64] = 127, [0][0][1][0][RTW89_ACMA][1][64] = 127, @@ -38555,6 +39083,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][64] = 127, [0][0][1][0][RTW89_UK][1][64] = 127, [0][0][1][0][RTW89_UK][0][64] = 127, + [0][0][1][0][RTW89_THAILAND][1][64] = 127, + [0][0][1][0][RTW89_THAILAND][0][64] = 127, [0][0][1][0][RTW89_FCC][1][66] = 22, [0][0][1][0][RTW89_FCC][2][66] = 68, [0][0][1][0][RTW89_ETSI][1][66] = 127, @@ -38562,6 +39092,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][66] = 127, [0][0][1][0][RTW89_MKK][0][66] = 127, [0][0][1][0][RTW89_IC][1][66] = 22, + [0][0][1][0][RTW89_IC][2][66] = 68, [0][0][1][0][RTW89_KCC][1][66] = 26, [0][0][1][0][RTW89_KCC][0][66] = 127, [0][0][1][0][RTW89_ACMA][1][66] = 127, @@ -38571,6 +39102,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][66] = 127, [0][0][1][0][RTW89_UK][1][66] = 127, [0][0][1][0][RTW89_UK][0][66] = 127, + [0][0][1][0][RTW89_THAILAND][1][66] = 127, + [0][0][1][0][RTW89_THAILAND][0][66] = 127, [0][0][1][0][RTW89_FCC][1][68] = 22, [0][0][1][0][RTW89_FCC][2][68] = 68, [0][0][1][0][RTW89_ETSI][1][68] = 127, @@ -38578,6 +39111,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][68] = 127, [0][0][1][0][RTW89_MKK][0][68] = 127, [0][0][1][0][RTW89_IC][1][68] = 22, + [0][0][1][0][RTW89_IC][2][68] = 68, [0][0][1][0][RTW89_KCC][1][68] = 26, [0][0][1][0][RTW89_KCC][0][68] = 127, [0][0][1][0][RTW89_ACMA][1][68] = 127, @@ -38587,6 +39121,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][68] = 127, [0][0][1][0][RTW89_UK][1][68] = 127, [0][0][1][0][RTW89_UK][0][68] = 127, + [0][0][1][0][RTW89_THAILAND][1][68] = 127, + [0][0][1][0][RTW89_THAILAND][0][68] = 127, [0][0][1][0][RTW89_FCC][1][70] = 24, [0][0][1][0][RTW89_FCC][2][70] = 68, [0][0][1][0][RTW89_ETSI][1][70] = 127, @@ -38594,6 +39130,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][70] = 127, [0][0][1][0][RTW89_MKK][0][70] = 127, [0][0][1][0][RTW89_IC][1][70] = 24, + [0][0][1][0][RTW89_IC][2][70] = 68, [0][0][1][0][RTW89_KCC][1][70] = 26, [0][0][1][0][RTW89_KCC][0][70] = 127, [0][0][1][0][RTW89_ACMA][1][70] = 127, @@ -38603,6 +39140,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][70] = 127, [0][0][1][0][RTW89_UK][1][70] = 127, [0][0][1][0][RTW89_UK][0][70] = 127, + [0][0][1][0][RTW89_THAILAND][1][70] = 127, + [0][0][1][0][RTW89_THAILAND][0][70] = 127, [0][0][1][0][RTW89_FCC][1][72] = 22, [0][0][1][0][RTW89_FCC][2][72] = 68, [0][0][1][0][RTW89_ETSI][1][72] = 127, @@ -38610,6 +39149,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][72] = 127, [0][0][1][0][RTW89_MKK][0][72] = 127, [0][0][1][0][RTW89_IC][1][72] = 22, + [0][0][1][0][RTW89_IC][2][72] = 68, [0][0][1][0][RTW89_KCC][1][72] = 26, [0][0][1][0][RTW89_KCC][0][72] = 127, [0][0][1][0][RTW89_ACMA][1][72] = 127, @@ -38619,6 +39159,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][72] = 127, [0][0][1][0][RTW89_UK][1][72] = 127, [0][0][1][0][RTW89_UK][0][72] = 127, + [0][0][1][0][RTW89_THAILAND][1][72] = 127, + [0][0][1][0][RTW89_THAILAND][0][72] = 127, [0][0][1][0][RTW89_FCC][1][74] = 22, [0][0][1][0][RTW89_FCC][2][74] = 68, [0][0][1][0][RTW89_ETSI][1][74] = 127, @@ -38626,6 +39168,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][74] = 127, [0][0][1][0][RTW89_MKK][0][74] = 127, [0][0][1][0][RTW89_IC][1][74] = 22, + [0][0][1][0][RTW89_IC][2][74] = 68, [0][0][1][0][RTW89_KCC][1][74] = 26, [0][0][1][0][RTW89_KCC][0][74] = 127, [0][0][1][0][RTW89_ACMA][1][74] = 127, @@ -38635,6 +39178,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][74] = 127, [0][0][1][0][RTW89_UK][1][74] = 127, [0][0][1][0][RTW89_UK][0][74] = 127, + [0][0][1][0][RTW89_THAILAND][1][74] = 127, + [0][0][1][0][RTW89_THAILAND][0][74] = 127, [0][0][1][0][RTW89_FCC][1][75] = 22, [0][0][1][0][RTW89_FCC][2][75] = 68, [0][0][1][0][RTW89_ETSI][1][75] = 127, @@ -38642,6 +39187,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][75] = 127, [0][0][1][0][RTW89_MKK][0][75] = 127, [0][0][1][0][RTW89_IC][1][75] = 22, + [0][0][1][0][RTW89_IC][2][75] = 68, [0][0][1][0][RTW89_KCC][1][75] = 26, [0][0][1][0][RTW89_KCC][0][75] = 127, [0][0][1][0][RTW89_ACMA][1][75] = 127, @@ -38651,6 +39197,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][75] = 127, [0][0][1][0][RTW89_UK][1][75] = 127, [0][0][1][0][RTW89_UK][0][75] = 127, + [0][0][1][0][RTW89_THAILAND][1][75] = 127, + [0][0][1][0][RTW89_THAILAND][0][75] = 127, [0][0][1][0][RTW89_FCC][1][77] = 22, [0][0][1][0][RTW89_FCC][2][77] = 68, [0][0][1][0][RTW89_ETSI][1][77] = 127, @@ -38658,6 +39206,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][77] = 127, [0][0][1][0][RTW89_MKK][0][77] = 127, [0][0][1][0][RTW89_IC][1][77] = 22, + [0][0][1][0][RTW89_IC][2][77] = 68, [0][0][1][0][RTW89_KCC][1][77] = 26, [0][0][1][0][RTW89_KCC][0][77] = 127, [0][0][1][0][RTW89_ACMA][1][77] = 127, @@ -38667,6 +39216,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][77] = 127, [0][0][1][0][RTW89_UK][1][77] = 127, [0][0][1][0][RTW89_UK][0][77] = 127, + [0][0][1][0][RTW89_THAILAND][1][77] = 127, + [0][0][1][0][RTW89_THAILAND][0][77] = 127, [0][0][1][0][RTW89_FCC][1][79] = 22, [0][0][1][0][RTW89_FCC][2][79] = 68, [0][0][1][0][RTW89_ETSI][1][79] = 127, @@ -38674,6 +39225,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][79] = 127, [0][0][1][0][RTW89_MKK][0][79] = 127, [0][0][1][0][RTW89_IC][1][79] = 22, + [0][0][1][0][RTW89_IC][2][79] = 68, [0][0][1][0][RTW89_KCC][1][79] = 26, [0][0][1][0][RTW89_KCC][0][79] = 127, [0][0][1][0][RTW89_ACMA][1][79] = 127, @@ -38683,6 +39235,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][79] = 127, [0][0][1][0][RTW89_UK][1][79] = 127, [0][0][1][0][RTW89_UK][0][79] = 127, + [0][0][1][0][RTW89_THAILAND][1][79] = 127, + [0][0][1][0][RTW89_THAILAND][0][79] = 127, [0][0][1][0][RTW89_FCC][1][81] = 22, [0][0][1][0][RTW89_FCC][2][81] = 68, [0][0][1][0][RTW89_ETSI][1][81] = 127, @@ -38690,6 +39244,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][81] = 127, [0][0][1][0][RTW89_MKK][0][81] = 127, [0][0][1][0][RTW89_IC][1][81] = 22, + [0][0][1][0][RTW89_IC][2][81] = 68, [0][0][1][0][RTW89_KCC][1][81] = 26, [0][0][1][0][RTW89_KCC][0][81] = 127, [0][0][1][0][RTW89_ACMA][1][81] = 127, @@ -38699,6 +39254,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][81] = 127, [0][0][1][0][RTW89_UK][1][81] = 127, [0][0][1][0][RTW89_UK][0][81] = 127, + [0][0][1][0][RTW89_THAILAND][1][81] = 127, + [0][0][1][0][RTW89_THAILAND][0][81] = 127, [0][0][1][0][RTW89_FCC][1][83] = 22, [0][0][1][0][RTW89_FCC][2][83] = 68, [0][0][1][0][RTW89_ETSI][1][83] = 127, @@ -38706,6 +39263,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][83] = 127, [0][0][1][0][RTW89_MKK][0][83] = 127, [0][0][1][0][RTW89_IC][1][83] = 22, + [0][0][1][0][RTW89_IC][2][83] = 68, [0][0][1][0][RTW89_KCC][1][83] = 32, [0][0][1][0][RTW89_KCC][0][83] = 127, [0][0][1][0][RTW89_ACMA][1][83] = 127, @@ -38715,6 +39273,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][83] = 127, [0][0][1][0][RTW89_UK][1][83] = 127, [0][0][1][0][RTW89_UK][0][83] = 127, + [0][0][1][0][RTW89_THAILAND][1][83] = 127, + [0][0][1][0][RTW89_THAILAND][0][83] = 127, [0][0][1][0][RTW89_FCC][1][85] = 22, [0][0][1][0][RTW89_FCC][2][85] = 68, [0][0][1][0][RTW89_ETSI][1][85] = 127, @@ -38722,6 +39282,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][85] = 127, [0][0][1][0][RTW89_MKK][0][85] = 127, [0][0][1][0][RTW89_IC][1][85] = 22, + [0][0][1][0][RTW89_IC][2][85] = 68, [0][0][1][0][RTW89_KCC][1][85] = 32, [0][0][1][0][RTW89_KCC][0][85] = 127, [0][0][1][0][RTW89_ACMA][1][85] = 127, @@ -38731,6 +39292,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][85] = 127, [0][0][1][0][RTW89_UK][1][85] = 127, [0][0][1][0][RTW89_UK][0][85] = 127, + [0][0][1][0][RTW89_THAILAND][1][85] = 127, + [0][0][1][0][RTW89_THAILAND][0][85] = 127, [0][0][1][0][RTW89_FCC][1][87] = 22, [0][0][1][0][RTW89_FCC][2][87] = 127, [0][0][1][0][RTW89_ETSI][1][87] = 127, @@ -38738,6 +39301,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][87] = 127, [0][0][1][0][RTW89_MKK][0][87] = 127, [0][0][1][0][RTW89_IC][1][87] = 22, + [0][0][1][0][RTW89_IC][2][87] = 127, [0][0][1][0][RTW89_KCC][1][87] = 32, [0][0][1][0][RTW89_KCC][0][87] = 127, [0][0][1][0][RTW89_ACMA][1][87] = 127, @@ -38747,6 +39311,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][87] = 127, [0][0][1][0][RTW89_UK][1][87] = 127, [0][0][1][0][RTW89_UK][0][87] = 127, + [0][0][1][0][RTW89_THAILAND][1][87] = 127, + [0][0][1][0][RTW89_THAILAND][0][87] = 127, [0][0][1][0][RTW89_FCC][1][89] = 22, [0][0][1][0][RTW89_FCC][2][89] = 127, [0][0][1][0][RTW89_ETSI][1][89] = 127, @@ -38754,6 +39320,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][89] = 127, [0][0][1][0][RTW89_MKK][0][89] = 127, [0][0][1][0][RTW89_IC][1][89] = 22, + [0][0][1][0][RTW89_IC][2][89] = 127, [0][0][1][0][RTW89_KCC][1][89] = 32, [0][0][1][0][RTW89_KCC][0][89] = 127, [0][0][1][0][RTW89_ACMA][1][89] = 127, @@ -38763,6 +39330,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][89] = 127, [0][0][1][0][RTW89_UK][1][89] = 127, [0][0][1][0][RTW89_UK][0][89] = 127, + [0][0][1][0][RTW89_THAILAND][1][89] = 127, + [0][0][1][0][RTW89_THAILAND][0][89] = 127, [0][0][1][0][RTW89_FCC][1][90] = 22, [0][0][1][0][RTW89_FCC][2][90] = 127, [0][0][1][0][RTW89_ETSI][1][90] = 127, @@ -38770,6 +39339,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][90] = 127, [0][0][1][0][RTW89_MKK][0][90] = 127, [0][0][1][0][RTW89_IC][1][90] = 22, + [0][0][1][0][RTW89_IC][2][90] = 127, [0][0][1][0][RTW89_KCC][1][90] = 32, [0][0][1][0][RTW89_KCC][0][90] = 127, [0][0][1][0][RTW89_ACMA][1][90] = 127, @@ -38779,6 +39349,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][90] = 127, [0][0][1][0][RTW89_UK][1][90] = 127, [0][0][1][0][RTW89_UK][0][90] = 127, + [0][0][1][0][RTW89_THAILAND][1][90] = 127, + [0][0][1][0][RTW89_THAILAND][0][90] = 127, [0][0][1][0][RTW89_FCC][1][92] = 22, [0][0][1][0][RTW89_FCC][2][92] = 127, [0][0][1][0][RTW89_ETSI][1][92] = 127, @@ -38786,6 +39358,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][92] = 127, [0][0][1][0][RTW89_MKK][0][92] = 127, [0][0][1][0][RTW89_IC][1][92] = 22, + [0][0][1][0][RTW89_IC][2][92] = 127, [0][0][1][0][RTW89_KCC][1][92] = 32, [0][0][1][0][RTW89_KCC][0][92] = 127, [0][0][1][0][RTW89_ACMA][1][92] = 127, @@ -38795,6 +39368,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][92] = 127, [0][0][1][0][RTW89_UK][1][92] = 127, [0][0][1][0][RTW89_UK][0][92] = 127, + [0][0][1][0][RTW89_THAILAND][1][92] = 127, + [0][0][1][0][RTW89_THAILAND][0][92] = 127, [0][0][1][0][RTW89_FCC][1][94] = 22, [0][0][1][0][RTW89_FCC][2][94] = 127, [0][0][1][0][RTW89_ETSI][1][94] = 127, @@ -38802,6 +39377,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][94] = 127, [0][0][1][0][RTW89_MKK][0][94] = 127, [0][0][1][0][RTW89_IC][1][94] = 22, + [0][0][1][0][RTW89_IC][2][94] = 127, [0][0][1][0][RTW89_KCC][1][94] = 32, [0][0][1][0][RTW89_KCC][0][94] = 127, [0][0][1][0][RTW89_ACMA][1][94] = 127, @@ -38811,6 +39387,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][94] = 127, [0][0][1][0][RTW89_UK][1][94] = 127, [0][0][1][0][RTW89_UK][0][94] = 127, + [0][0][1][0][RTW89_THAILAND][1][94] = 127, + [0][0][1][0][RTW89_THAILAND][0][94] = 127, [0][0][1][0][RTW89_FCC][1][96] = 22, [0][0][1][0][RTW89_FCC][2][96] = 127, [0][0][1][0][RTW89_ETSI][1][96] = 127, @@ -38818,6 +39396,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][96] = 127, [0][0][1][0][RTW89_MKK][0][96] = 127, [0][0][1][0][RTW89_IC][1][96] = 22, + [0][0][1][0][RTW89_IC][2][96] = 127, [0][0][1][0][RTW89_KCC][1][96] = 32, [0][0][1][0][RTW89_KCC][0][96] = 127, [0][0][1][0][RTW89_ACMA][1][96] = 127, @@ -38827,6 +39406,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][96] = 127, [0][0][1][0][RTW89_UK][1][96] = 127, [0][0][1][0][RTW89_UK][0][96] = 127, + [0][0][1][0][RTW89_THAILAND][1][96] = 127, + [0][0][1][0][RTW89_THAILAND][0][96] = 127, [0][0][1][0][RTW89_FCC][1][98] = 22, [0][0][1][0][RTW89_FCC][2][98] = 127, [0][0][1][0][RTW89_ETSI][1][98] = 127, @@ -38834,6 +39415,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][98] = 127, [0][0][1][0][RTW89_MKK][0][98] = 127, [0][0][1][0][RTW89_IC][1][98] = 22, + [0][0][1][0][RTW89_IC][2][98] = 127, [0][0][1][0][RTW89_KCC][1][98] = 32, [0][0][1][0][RTW89_KCC][0][98] = 127, [0][0][1][0][RTW89_ACMA][1][98] = 127, @@ -38843,6 +39425,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][98] = 127, [0][0][1][0][RTW89_UK][1][98] = 127, [0][0][1][0][RTW89_UK][0][98] = 127, + [0][0][1][0][RTW89_THAILAND][1][98] = 127, + [0][0][1][0][RTW89_THAILAND][0][98] = 127, [0][0][1][0][RTW89_FCC][1][100] = 22, [0][0][1][0][RTW89_FCC][2][100] = 127, [0][0][1][0][RTW89_ETSI][1][100] = 127, @@ -38850,6 +39434,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][100] = 127, [0][0][1][0][RTW89_MKK][0][100] = 127, [0][0][1][0][RTW89_IC][1][100] = 22, + [0][0][1][0][RTW89_IC][2][100] = 127, [0][0][1][0][RTW89_KCC][1][100] = 32, [0][0][1][0][RTW89_KCC][0][100] = 127, [0][0][1][0][RTW89_ACMA][1][100] = 127, @@ -38859,6 +39444,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][100] = 127, [0][0][1][0][RTW89_UK][1][100] = 127, [0][0][1][0][RTW89_UK][0][100] = 127, + [0][0][1][0][RTW89_THAILAND][1][100] = 127, + [0][0][1][0][RTW89_THAILAND][0][100] = 127, [0][0][1][0][RTW89_FCC][1][102] = 22, [0][0][1][0][RTW89_FCC][2][102] = 127, [0][0][1][0][RTW89_ETSI][1][102] = 127, @@ -38866,6 +39453,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][102] = 127, [0][0][1][0][RTW89_MKK][0][102] = 127, [0][0][1][0][RTW89_IC][1][102] = 22, + [0][0][1][0][RTW89_IC][2][102] = 127, [0][0][1][0][RTW89_KCC][1][102] = 32, [0][0][1][0][RTW89_KCC][0][102] = 127, [0][0][1][0][RTW89_ACMA][1][102] = 127, @@ -38875,6 +39463,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][102] = 127, [0][0][1][0][RTW89_UK][1][102] = 127, [0][0][1][0][RTW89_UK][0][102] = 127, + [0][0][1][0][RTW89_THAILAND][1][102] = 127, + [0][0][1][0][RTW89_THAILAND][0][102] = 127, [0][0][1][0][RTW89_FCC][1][104] = 22, [0][0][1][0][RTW89_FCC][2][104] = 127, [0][0][1][0][RTW89_ETSI][1][104] = 127, @@ -38882,6 +39472,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][104] = 127, [0][0][1][0][RTW89_MKK][0][104] = 127, [0][0][1][0][RTW89_IC][1][104] = 22, + [0][0][1][0][RTW89_IC][2][104] = 127, [0][0][1][0][RTW89_KCC][1][104] = 32, [0][0][1][0][RTW89_KCC][0][104] = 127, [0][0][1][0][RTW89_ACMA][1][104] = 127, @@ -38891,6 +39482,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][104] = 127, [0][0][1][0][RTW89_UK][1][104] = 127, [0][0][1][0][RTW89_UK][0][104] = 127, + [0][0][1][0][RTW89_THAILAND][1][104] = 127, + [0][0][1][0][RTW89_THAILAND][0][104] = 127, [0][0][1][0][RTW89_FCC][1][105] = 22, [0][0][1][0][RTW89_FCC][2][105] = 127, [0][0][1][0][RTW89_ETSI][1][105] = 127, @@ -38898,6 +39491,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][105] = 127, [0][0][1][0][RTW89_MKK][0][105] = 127, [0][0][1][0][RTW89_IC][1][105] = 22, + [0][0][1][0][RTW89_IC][2][105] = 127, [0][0][1][0][RTW89_KCC][1][105] = 32, [0][0][1][0][RTW89_KCC][0][105] = 127, [0][0][1][0][RTW89_ACMA][1][105] = 127, @@ -38907,6 +39501,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][105] = 127, [0][0][1][0][RTW89_UK][1][105] = 127, [0][0][1][0][RTW89_UK][0][105] = 127, + [0][0][1][0][RTW89_THAILAND][1][105] = 127, + [0][0][1][0][RTW89_THAILAND][0][105] = 127, [0][0][1][0][RTW89_FCC][1][107] = 24, [0][0][1][0][RTW89_FCC][2][107] = 127, [0][0][1][0][RTW89_ETSI][1][107] = 127, @@ -38914,6 +39510,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][107] = 127, [0][0][1][0][RTW89_MKK][0][107] = 127, [0][0][1][0][RTW89_IC][1][107] = 24, + [0][0][1][0][RTW89_IC][2][107] = 127, [0][0][1][0][RTW89_KCC][1][107] = 32, [0][0][1][0][RTW89_KCC][0][107] = 127, [0][0][1][0][RTW89_ACMA][1][107] = 127, @@ -38923,6 +39520,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][107] = 127, [0][0][1][0][RTW89_UK][1][107] = 127, [0][0][1][0][RTW89_UK][0][107] = 127, + [0][0][1][0][RTW89_THAILAND][1][107] = 127, + [0][0][1][0][RTW89_THAILAND][0][107] = 127, [0][0][1][0][RTW89_FCC][1][109] = 24, [0][0][1][0][RTW89_FCC][2][109] = 127, [0][0][1][0][RTW89_ETSI][1][109] = 127, @@ -38930,6 +39529,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][109] = 127, [0][0][1][0][RTW89_MKK][0][109] = 127, [0][0][1][0][RTW89_IC][1][109] = 24, + [0][0][1][0][RTW89_IC][2][109] = 127, [0][0][1][0][RTW89_KCC][1][109] = 32, [0][0][1][0][RTW89_KCC][0][109] = 127, [0][0][1][0][RTW89_ACMA][1][109] = 127, @@ -38939,6 +39539,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][109] = 127, [0][0][1][0][RTW89_UK][1][109] = 127, [0][0][1][0][RTW89_UK][0][109] = 127, + [0][0][1][0][RTW89_THAILAND][1][109] = 127, + [0][0][1][0][RTW89_THAILAND][0][109] = 127, [0][0][1][0][RTW89_FCC][1][111] = 127, [0][0][1][0][RTW89_FCC][2][111] = 127, [0][0][1][0][RTW89_ETSI][1][111] = 127, @@ -38946,6 +39548,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][111] = 127, [0][0][1][0][RTW89_MKK][0][111] = 127, [0][0][1][0][RTW89_IC][1][111] = 127, + [0][0][1][0][RTW89_IC][2][111] = 127, [0][0][1][0][RTW89_KCC][1][111] = 127, [0][0][1][0][RTW89_KCC][0][111] = 127, [0][0][1][0][RTW89_ACMA][1][111] = 127, @@ -38955,6 +39558,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][111] = 127, [0][0][1][0][RTW89_UK][1][111] = 127, [0][0][1][0][RTW89_UK][0][111] = 127, + [0][0][1][0][RTW89_THAILAND][1][111] = 127, + [0][0][1][0][RTW89_THAILAND][0][111] = 127, [0][0][1][0][RTW89_FCC][1][113] = 127, [0][0][1][0][RTW89_FCC][2][113] = 127, [0][0][1][0][RTW89_ETSI][1][113] = 127, @@ -38962,6 +39567,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][113] = 127, [0][0][1][0][RTW89_MKK][0][113] = 127, [0][0][1][0][RTW89_IC][1][113] = 127, + [0][0][1][0][RTW89_IC][2][113] = 127, [0][0][1][0][RTW89_KCC][1][113] = 127, [0][0][1][0][RTW89_KCC][0][113] = 127, [0][0][1][0][RTW89_ACMA][1][113] = 127, @@ -38971,6 +39577,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][113] = 127, [0][0][1][0][RTW89_UK][1][113] = 127, [0][0][1][0][RTW89_UK][0][113] = 127, + [0][0][1][0][RTW89_THAILAND][1][113] = 127, + [0][0][1][0][RTW89_THAILAND][0][113] = 127, [0][0][1][0][RTW89_FCC][1][115] = 127, [0][0][1][0][RTW89_FCC][2][115] = 127, [0][0][1][0][RTW89_ETSI][1][115] = 127, @@ -38978,6 +39586,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][115] = 127, [0][0][1][0][RTW89_MKK][0][115] = 127, [0][0][1][0][RTW89_IC][1][115] = 127, + [0][0][1][0][RTW89_IC][2][115] = 127, [0][0][1][0][RTW89_KCC][1][115] = 127, [0][0][1][0][RTW89_KCC][0][115] = 127, [0][0][1][0][RTW89_ACMA][1][115] = 127, @@ -38987,6 +39596,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][115] = 127, [0][0][1][0][RTW89_UK][1][115] = 127, [0][0][1][0][RTW89_UK][0][115] = 127, + [0][0][1][0][RTW89_THAILAND][1][115] = 127, + [0][0][1][0][RTW89_THAILAND][0][115] = 127, [0][0][1][0][RTW89_FCC][1][117] = 127, [0][0][1][0][RTW89_FCC][2][117] = 127, [0][0][1][0][RTW89_ETSI][1][117] = 127, @@ -38994,6 +39605,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][117] = 127, [0][0][1][0][RTW89_MKK][0][117] = 127, [0][0][1][0][RTW89_IC][1][117] = 127, + [0][0][1][0][RTW89_IC][2][117] = 127, [0][0][1][0][RTW89_KCC][1][117] = 127, [0][0][1][0][RTW89_KCC][0][117] = 127, [0][0][1][0][RTW89_ACMA][1][117] = 127, @@ -39003,6 +39615,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][117] = 127, [0][0][1][0][RTW89_UK][1][117] = 127, [0][0][1][0][RTW89_UK][0][117] = 127, + [0][0][1][0][RTW89_THAILAND][1][117] = 127, + [0][0][1][0][RTW89_THAILAND][0][117] = 127, [0][0][1][0][RTW89_FCC][1][119] = 127, [0][0][1][0][RTW89_FCC][2][119] = 127, [0][0][1][0][RTW89_ETSI][1][119] = 127, @@ -39010,6 +39624,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][119] = 127, [0][0][1][0][RTW89_MKK][0][119] = 127, [0][0][1][0][RTW89_IC][1][119] = 127, + [0][0][1][0][RTW89_IC][2][119] = 127, [0][0][1][0][RTW89_KCC][1][119] = 127, [0][0][1][0][RTW89_KCC][0][119] = 127, [0][0][1][0][RTW89_ACMA][1][119] = 127, @@ -39019,6 +39634,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][119] = 127, [0][0][1][0][RTW89_UK][1][119] = 127, [0][0][1][0][RTW89_UK][0][119] = 127, + [0][0][1][0][RTW89_THAILAND][1][119] = 127, + [0][0][1][0][RTW89_THAILAND][0][119] = 127, [0][1][1][0][RTW89_FCC][1][0] = -2, [0][1][1][0][RTW89_FCC][2][0] = 54, [0][1][1][0][RTW89_ETSI][1][0] = 54, @@ -39026,6 +39643,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][0] = 56, [0][1][1][0][RTW89_MKK][0][0] = 16, [0][1][1][0][RTW89_IC][1][0] = -2, + [0][1][1][0][RTW89_IC][2][0] = 54, [0][1][1][0][RTW89_KCC][1][0] = 12, [0][1][1][0][RTW89_KCC][0][0] = 10, [0][1][1][0][RTW89_ACMA][1][0] = 54, @@ -39035,6 +39653,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][0] = 18, [0][1][1][0][RTW89_UK][1][0] = 54, [0][1][1][0][RTW89_UK][0][0] = 18, + [0][1][1][0][RTW89_THAILAND][1][0] = 44, + [0][1][1][0][RTW89_THAILAND][0][0] = -2, [0][1][1][0][RTW89_FCC][1][2] = -4, [0][1][1][0][RTW89_FCC][2][2] = 54, [0][1][1][0][RTW89_ETSI][1][2] = 54, @@ -39042,6 +39662,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][2] = 54, [0][1][1][0][RTW89_MKK][0][2] = 16, [0][1][1][0][RTW89_IC][1][2] = -4, + [0][1][1][0][RTW89_IC][2][2] = 54, [0][1][1][0][RTW89_KCC][1][2] = 12, [0][1][1][0][RTW89_KCC][0][2] = 12, [0][1][1][0][RTW89_ACMA][1][2] = 54, @@ -39051,6 +39672,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][2] = 18, [0][1][1][0][RTW89_UK][1][2] = 54, [0][1][1][0][RTW89_UK][0][2] = 18, + [0][1][1][0][RTW89_THAILAND][1][2] = 44, + [0][1][1][0][RTW89_THAILAND][0][2] = -4, [0][1][1][0][RTW89_FCC][1][4] = -4, [0][1][1][0][RTW89_FCC][2][4] = 54, [0][1][1][0][RTW89_ETSI][1][4] = 54, @@ -39058,6 +39681,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][4] = 54, [0][1][1][0][RTW89_MKK][0][4] = 16, [0][1][1][0][RTW89_IC][1][4] = -4, + [0][1][1][0][RTW89_IC][2][4] = 54, [0][1][1][0][RTW89_KCC][1][4] = 12, [0][1][1][0][RTW89_KCC][0][4] = 12, [0][1][1][0][RTW89_ACMA][1][4] = 54, @@ -39067,6 +39691,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][4] = 18, [0][1][1][0][RTW89_UK][1][4] = 54, [0][1][1][0][RTW89_UK][0][4] = 18, + [0][1][1][0][RTW89_THAILAND][1][4] = 44, + [0][1][1][0][RTW89_THAILAND][0][4] = -4, [0][1][1][0][RTW89_FCC][1][6] = -4, [0][1][1][0][RTW89_FCC][2][6] = 54, [0][1][1][0][RTW89_ETSI][1][6] = 54, @@ -39074,6 +39700,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][6] = 54, [0][1][1][0][RTW89_MKK][0][6] = 16, [0][1][1][0][RTW89_IC][1][6] = -4, + [0][1][1][0][RTW89_IC][2][6] = 54, [0][1][1][0][RTW89_KCC][1][6] = 12, [0][1][1][0][RTW89_KCC][0][6] = 12, [0][1][1][0][RTW89_ACMA][1][6] = 54, @@ -39083,6 +39710,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][6] = 18, [0][1][1][0][RTW89_UK][1][6] = 54, [0][1][1][0][RTW89_UK][0][6] = 18, + [0][1][1][0][RTW89_THAILAND][1][6] = 44, + [0][1][1][0][RTW89_THAILAND][0][6] = -4, [0][1][1][0][RTW89_FCC][1][8] = -4, [0][1][1][0][RTW89_FCC][2][8] = 54, [0][1][1][0][RTW89_ETSI][1][8] = 54, @@ -39090,6 +39719,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][8] = 54, [0][1][1][0][RTW89_MKK][0][8] = 16, [0][1][1][0][RTW89_IC][1][8] = -4, + [0][1][1][0][RTW89_IC][2][8] = 54, [0][1][1][0][RTW89_KCC][1][8] = 12, [0][1][1][0][RTW89_KCC][0][8] = 12, [0][1][1][0][RTW89_ACMA][1][8] = 54, @@ -39099,6 +39729,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][8] = 18, [0][1][1][0][RTW89_UK][1][8] = 54, [0][1][1][0][RTW89_UK][0][8] = 18, + [0][1][1][0][RTW89_THAILAND][1][8] = 44, + [0][1][1][0][RTW89_THAILAND][0][8] = -4, [0][1][1][0][RTW89_FCC][1][10] = -4, [0][1][1][0][RTW89_FCC][2][10] = 54, [0][1][1][0][RTW89_ETSI][1][10] = 54, @@ -39106,6 +39738,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][10] = 54, [0][1][1][0][RTW89_MKK][0][10] = 16, [0][1][1][0][RTW89_IC][1][10] = -4, + [0][1][1][0][RTW89_IC][2][10] = 54, [0][1][1][0][RTW89_KCC][1][10] = 12, [0][1][1][0][RTW89_KCC][0][10] = 12, [0][1][1][0][RTW89_ACMA][1][10] = 54, @@ -39115,6 +39748,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][10] = 18, [0][1][1][0][RTW89_UK][1][10] = 54, [0][1][1][0][RTW89_UK][0][10] = 18, + [0][1][1][0][RTW89_THAILAND][1][10] = 44, + [0][1][1][0][RTW89_THAILAND][0][10] = -4, [0][1][1][0][RTW89_FCC][1][12] = -4, [0][1][1][0][RTW89_FCC][2][12] = 54, [0][1][1][0][RTW89_ETSI][1][12] = 54, @@ -39122,6 +39757,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][12] = 54, [0][1][1][0][RTW89_MKK][0][12] = 16, [0][1][1][0][RTW89_IC][1][12] = -4, + [0][1][1][0][RTW89_IC][2][12] = 54, [0][1][1][0][RTW89_KCC][1][12] = 12, [0][1][1][0][RTW89_KCC][0][12] = 12, [0][1][1][0][RTW89_ACMA][1][12] = 54, @@ -39131,6 +39767,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][12] = 18, [0][1][1][0][RTW89_UK][1][12] = 54, [0][1][1][0][RTW89_UK][0][12] = 18, + [0][1][1][0][RTW89_THAILAND][1][12] = 44, + [0][1][1][0][RTW89_THAILAND][0][12] = -4, [0][1][1][0][RTW89_FCC][1][14] = -4, [0][1][1][0][RTW89_FCC][2][14] = 54, [0][1][1][0][RTW89_ETSI][1][14] = 54, @@ -39138,6 +39776,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][14] = 54, [0][1][1][0][RTW89_MKK][0][14] = 16, [0][1][1][0][RTW89_IC][1][14] = -4, + [0][1][1][0][RTW89_IC][2][14] = 54, [0][1][1][0][RTW89_KCC][1][14] = 12, [0][1][1][0][RTW89_KCC][0][14] = 12, [0][1][1][0][RTW89_ACMA][1][14] = 54, @@ -39147,6 +39786,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][14] = 18, [0][1][1][0][RTW89_UK][1][14] = 54, [0][1][1][0][RTW89_UK][0][14] = 18, + [0][1][1][0][RTW89_THAILAND][1][14] = 44, + [0][1][1][0][RTW89_THAILAND][0][14] = -4, [0][1][1][0][RTW89_FCC][1][15] = -4, [0][1][1][0][RTW89_FCC][2][15] = 54, [0][1][1][0][RTW89_ETSI][1][15] = 54, @@ -39154,6 +39795,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][15] = 54, [0][1][1][0][RTW89_MKK][0][15] = 16, [0][1][1][0][RTW89_IC][1][15] = -4, + [0][1][1][0][RTW89_IC][2][15] = 54, [0][1][1][0][RTW89_KCC][1][15] = 12, [0][1][1][0][RTW89_KCC][0][15] = 12, [0][1][1][0][RTW89_ACMA][1][15] = 54, @@ -39163,6 +39805,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][15] = 18, [0][1][1][0][RTW89_UK][1][15] = 54, [0][1][1][0][RTW89_UK][0][15] = 18, + [0][1][1][0][RTW89_THAILAND][1][15] = 44, + [0][1][1][0][RTW89_THAILAND][0][15] = -4, [0][1][1][0][RTW89_FCC][1][17] = -4, [0][1][1][0][RTW89_FCC][2][17] = 54, [0][1][1][0][RTW89_ETSI][1][17] = 54, @@ -39170,6 +39814,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][17] = 54, [0][1][1][0][RTW89_MKK][0][17] = 16, [0][1][1][0][RTW89_IC][1][17] = -4, + [0][1][1][0][RTW89_IC][2][17] = 54, [0][1][1][0][RTW89_KCC][1][17] = 12, [0][1][1][0][RTW89_KCC][0][17] = 12, [0][1][1][0][RTW89_ACMA][1][17] = 54, @@ -39179,6 +39824,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][17] = 18, [0][1][1][0][RTW89_UK][1][17] = 54, [0][1][1][0][RTW89_UK][0][17] = 18, + [0][1][1][0][RTW89_THAILAND][1][17] = 44, + [0][1][1][0][RTW89_THAILAND][0][17] = -4, [0][1][1][0][RTW89_FCC][1][19] = -4, [0][1][1][0][RTW89_FCC][2][19] = 54, [0][1][1][0][RTW89_ETSI][1][19] = 54, @@ -39186,6 +39833,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][19] = 54, [0][1][1][0][RTW89_MKK][0][19] = 16, [0][1][1][0][RTW89_IC][1][19] = -4, + [0][1][1][0][RTW89_IC][2][19] = 54, [0][1][1][0][RTW89_KCC][1][19] = 12, [0][1][1][0][RTW89_KCC][0][19] = 12, [0][1][1][0][RTW89_ACMA][1][19] = 54, @@ -39195,6 +39843,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][19] = 18, [0][1][1][0][RTW89_UK][1][19] = 54, [0][1][1][0][RTW89_UK][0][19] = 18, + [0][1][1][0][RTW89_THAILAND][1][19] = 44, + [0][1][1][0][RTW89_THAILAND][0][19] = -4, [0][1][1][0][RTW89_FCC][1][21] = -4, [0][1][1][0][RTW89_FCC][2][21] = 54, [0][1][1][0][RTW89_ETSI][1][21] = 54, @@ -39202,6 +39852,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][21] = 54, [0][1][1][0][RTW89_MKK][0][21] = 16, [0][1][1][0][RTW89_IC][1][21] = -4, + [0][1][1][0][RTW89_IC][2][21] = 54, [0][1][1][0][RTW89_KCC][1][21] = 12, [0][1][1][0][RTW89_KCC][0][21] = 12, [0][1][1][0][RTW89_ACMA][1][21] = 54, @@ -39211,6 +39862,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][21] = 18, [0][1][1][0][RTW89_UK][1][21] = 54, [0][1][1][0][RTW89_UK][0][21] = 18, + [0][1][1][0][RTW89_THAILAND][1][21] = 44, + [0][1][1][0][RTW89_THAILAND][0][21] = -4, [0][1][1][0][RTW89_FCC][1][23] = -4, [0][1][1][0][RTW89_FCC][2][23] = 68, [0][1][1][0][RTW89_ETSI][1][23] = 54, @@ -39218,6 +39871,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][23] = 54, [0][1][1][0][RTW89_MKK][0][23] = 16, [0][1][1][0][RTW89_IC][1][23] = -4, + [0][1][1][0][RTW89_IC][2][23] = 68, [0][1][1][0][RTW89_KCC][1][23] = 12, [0][1][1][0][RTW89_KCC][0][23] = 10, [0][1][1][0][RTW89_ACMA][1][23] = 54, @@ -39227,6 +39881,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][23] = 18, [0][1][1][0][RTW89_UK][1][23] = 54, [0][1][1][0][RTW89_UK][0][23] = 18, + [0][1][1][0][RTW89_THAILAND][1][23] = 44, + [0][1][1][0][RTW89_THAILAND][0][23] = -4, [0][1][1][0][RTW89_FCC][1][25] = -4, [0][1][1][0][RTW89_FCC][2][25] = 68, [0][1][1][0][RTW89_ETSI][1][25] = 54, @@ -39234,6 +39890,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][25] = 54, [0][1][1][0][RTW89_MKK][0][25] = 16, [0][1][1][0][RTW89_IC][1][25] = -4, + [0][1][1][0][RTW89_IC][2][25] = 68, [0][1][1][0][RTW89_KCC][1][25] = 12, [0][1][1][0][RTW89_KCC][0][25] = 14, [0][1][1][0][RTW89_ACMA][1][25] = 54, @@ -39243,6 +39900,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][25] = 18, [0][1][1][0][RTW89_UK][1][25] = 54, [0][1][1][0][RTW89_UK][0][25] = 18, + [0][1][1][0][RTW89_THAILAND][1][25] = 42, + [0][1][1][0][RTW89_THAILAND][0][25] = -4, [0][1][1][0][RTW89_FCC][1][27] = -4, [0][1][1][0][RTW89_FCC][2][27] = 68, [0][1][1][0][RTW89_ETSI][1][27] = 54, @@ -39250,6 +39909,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][27] = 54, [0][1][1][0][RTW89_MKK][0][27] = 16, [0][1][1][0][RTW89_IC][1][27] = -4, + [0][1][1][0][RTW89_IC][2][27] = 68, [0][1][1][0][RTW89_KCC][1][27] = 12, [0][1][1][0][RTW89_KCC][0][27] = 14, [0][1][1][0][RTW89_ACMA][1][27] = 54, @@ -39259,6 +39919,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][27] = 18, [0][1][1][0][RTW89_UK][1][27] = 54, [0][1][1][0][RTW89_UK][0][27] = 18, + [0][1][1][0][RTW89_THAILAND][1][27] = 42, + [0][1][1][0][RTW89_THAILAND][0][27] = -4, [0][1][1][0][RTW89_FCC][1][29] = -4, [0][1][1][0][RTW89_FCC][2][29] = 68, [0][1][1][0][RTW89_ETSI][1][29] = 54, @@ -39266,6 +39928,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][29] = 54, [0][1][1][0][RTW89_MKK][0][29] = 16, [0][1][1][0][RTW89_IC][1][29] = -4, + [0][1][1][0][RTW89_IC][2][29] = 68, [0][1][1][0][RTW89_KCC][1][29] = 12, [0][1][1][0][RTW89_KCC][0][29] = 14, [0][1][1][0][RTW89_ACMA][1][29] = 54, @@ -39275,6 +39938,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][29] = 18, [0][1][1][0][RTW89_UK][1][29] = 54, [0][1][1][0][RTW89_UK][0][29] = 18, + [0][1][1][0][RTW89_THAILAND][1][29] = 42, + [0][1][1][0][RTW89_THAILAND][0][29] = -4, [0][1][1][0][RTW89_FCC][1][30] = -4, [0][1][1][0][RTW89_FCC][2][30] = 68, [0][1][1][0][RTW89_ETSI][1][30] = 54, @@ -39282,6 +39947,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][30] = 54, [0][1][1][0][RTW89_MKK][0][30] = 16, [0][1][1][0][RTW89_IC][1][30] = -4, + [0][1][1][0][RTW89_IC][2][30] = 68, [0][1][1][0][RTW89_KCC][1][30] = 12, [0][1][1][0][RTW89_KCC][0][30] = 14, [0][1][1][0][RTW89_ACMA][1][30] = 54, @@ -39291,6 +39957,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][30] = 18, [0][1][1][0][RTW89_UK][1][30] = 54, [0][1][1][0][RTW89_UK][0][30] = 18, + [0][1][1][0][RTW89_THAILAND][1][30] = 42, + [0][1][1][0][RTW89_THAILAND][0][30] = -4, [0][1][1][0][RTW89_FCC][1][32] = -4, [0][1][1][0][RTW89_FCC][2][32] = 68, [0][1][1][0][RTW89_ETSI][1][32] = 54, @@ -39298,6 +39966,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][32] = 54, [0][1][1][0][RTW89_MKK][0][32] = 16, [0][1][1][0][RTW89_IC][1][32] = -4, + [0][1][1][0][RTW89_IC][2][32] = 68, [0][1][1][0][RTW89_KCC][1][32] = 12, [0][1][1][0][RTW89_KCC][0][32] = 14, [0][1][1][0][RTW89_ACMA][1][32] = 54, @@ -39307,6 +39976,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][32] = 18, [0][1][1][0][RTW89_UK][1][32] = 54, [0][1][1][0][RTW89_UK][0][32] = 18, + [0][1][1][0][RTW89_THAILAND][1][32] = 42, + [0][1][1][0][RTW89_THAILAND][0][32] = -4, [0][1][1][0][RTW89_FCC][1][34] = -4, [0][1][1][0][RTW89_FCC][2][34] = 68, [0][1][1][0][RTW89_ETSI][1][34] = 54, @@ -39314,6 +39985,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][34] = 54, [0][1][1][0][RTW89_MKK][0][34] = 16, [0][1][1][0][RTW89_IC][1][34] = -4, + [0][1][1][0][RTW89_IC][2][34] = 68, [0][1][1][0][RTW89_KCC][1][34] = 12, [0][1][1][0][RTW89_KCC][0][34] = 14, [0][1][1][0][RTW89_ACMA][1][34] = 54, @@ -39323,6 +39995,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][34] = 18, [0][1][1][0][RTW89_UK][1][34] = 54, [0][1][1][0][RTW89_UK][0][34] = 18, + [0][1][1][0][RTW89_THAILAND][1][34] = 42, + [0][1][1][0][RTW89_THAILAND][0][34] = -4, [0][1][1][0][RTW89_FCC][1][36] = -4, [0][1][1][0][RTW89_FCC][2][36] = 68, [0][1][1][0][RTW89_ETSI][1][36] = 54, @@ -39330,6 +40004,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][36] = 54, [0][1][1][0][RTW89_MKK][0][36] = 16, [0][1][1][0][RTW89_IC][1][36] = -4, + [0][1][1][0][RTW89_IC][2][36] = 68, [0][1][1][0][RTW89_KCC][1][36] = 12, [0][1][1][0][RTW89_KCC][0][36] = 14, [0][1][1][0][RTW89_ACMA][1][36] = 54, @@ -39339,6 +40014,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][36] = 18, [0][1][1][0][RTW89_UK][1][36] = 54, [0][1][1][0][RTW89_UK][0][36] = 18, + [0][1][1][0][RTW89_THAILAND][1][36] = 42, + [0][1][1][0][RTW89_THAILAND][0][36] = -4, [0][1][1][0][RTW89_FCC][1][38] = -4, [0][1][1][0][RTW89_FCC][2][38] = 68, [0][1][1][0][RTW89_ETSI][1][38] = 54, @@ -39346,6 +40023,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][38] = 54, [0][1][1][0][RTW89_MKK][0][38] = 16, [0][1][1][0][RTW89_IC][1][38] = -4, + [0][1][1][0][RTW89_IC][2][38] = 68, [0][1][1][0][RTW89_KCC][1][38] = 12, [0][1][1][0][RTW89_KCC][0][38] = 14, [0][1][1][0][RTW89_ACMA][1][38] = 54, @@ -39355,6 +40033,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][38] = 18, [0][1][1][0][RTW89_UK][1][38] = 54, [0][1][1][0][RTW89_UK][0][38] = 18, + [0][1][1][0][RTW89_THAILAND][1][38] = 42, + [0][1][1][0][RTW89_THAILAND][0][38] = -4, [0][1][1][0][RTW89_FCC][1][40] = -4, [0][1][1][0][RTW89_FCC][2][40] = 68, [0][1][1][0][RTW89_ETSI][1][40] = 54, @@ -39362,6 +40042,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][40] = 54, [0][1][1][0][RTW89_MKK][0][40] = 16, [0][1][1][0][RTW89_IC][1][40] = -4, + [0][1][1][0][RTW89_IC][2][40] = 68, [0][1][1][0][RTW89_KCC][1][40] = 12, [0][1][1][0][RTW89_KCC][0][40] = 14, [0][1][1][0][RTW89_ACMA][1][40] = 54, @@ -39371,6 +40052,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][40] = 18, [0][1][1][0][RTW89_UK][1][40] = 54, [0][1][1][0][RTW89_UK][0][40] = 18, + [0][1][1][0][RTW89_THAILAND][1][40] = 42, + [0][1][1][0][RTW89_THAILAND][0][40] = -4, [0][1][1][0][RTW89_FCC][1][42] = -4, [0][1][1][0][RTW89_FCC][2][42] = 68, [0][1][1][0][RTW89_ETSI][1][42] = 54, @@ -39378,6 +40061,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][42] = 54, [0][1][1][0][RTW89_MKK][0][42] = 16, [0][1][1][0][RTW89_IC][1][42] = -4, + [0][1][1][0][RTW89_IC][2][42] = 68, [0][1][1][0][RTW89_KCC][1][42] = 12, [0][1][1][0][RTW89_KCC][0][42] = 14, [0][1][1][0][RTW89_ACMA][1][42] = 54, @@ -39387,6 +40071,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][42] = 18, [0][1][1][0][RTW89_UK][1][42] = 54, [0][1][1][0][RTW89_UK][0][42] = 18, + [0][1][1][0][RTW89_THAILAND][1][42] = 42, + [0][1][1][0][RTW89_THAILAND][0][42] = -4, [0][1][1][0][RTW89_FCC][1][44] = -2, [0][1][1][0][RTW89_FCC][2][44] = 68, [0][1][1][0][RTW89_ETSI][1][44] = 54, @@ -39394,6 +40080,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][44] = 34, [0][1][1][0][RTW89_MKK][0][44] = 16, [0][1][1][0][RTW89_IC][1][44] = -2, + [0][1][1][0][RTW89_IC][2][44] = 68, [0][1][1][0][RTW89_KCC][1][44] = 12, [0][1][1][0][RTW89_KCC][0][44] = 12, [0][1][1][0][RTW89_ACMA][1][44] = 54, @@ -39403,6 +40090,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][44] = 18, [0][1][1][0][RTW89_UK][1][44] = 54, [0][1][1][0][RTW89_UK][0][44] = 18, + [0][1][1][0][RTW89_THAILAND][1][44] = 42, + [0][1][1][0][RTW89_THAILAND][0][44] = -2, [0][1][1][0][RTW89_FCC][1][45] = -2, [0][1][1][0][RTW89_FCC][2][45] = 127, [0][1][1][0][RTW89_ETSI][1][45] = 127, @@ -39410,6 +40099,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][45] = 127, [0][1][1][0][RTW89_MKK][0][45] = 127, [0][1][1][0][RTW89_IC][1][45] = -2, + [0][1][1][0][RTW89_IC][2][45] = 70, [0][1][1][0][RTW89_KCC][1][45] = 12, [0][1][1][0][RTW89_KCC][0][45] = 127, [0][1][1][0][RTW89_ACMA][1][45] = 127, @@ -39419,6 +40109,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][45] = 127, [0][1][1][0][RTW89_UK][1][45] = 127, [0][1][1][0][RTW89_UK][0][45] = 127, + [0][1][1][0][RTW89_THAILAND][1][45] = 127, + [0][1][1][0][RTW89_THAILAND][0][45] = 127, [0][1][1][0][RTW89_FCC][1][47] = -2, [0][1][1][0][RTW89_FCC][2][47] = 127, [0][1][1][0][RTW89_ETSI][1][47] = 127, @@ -39426,6 +40118,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][47] = 127, [0][1][1][0][RTW89_MKK][0][47] = 127, [0][1][1][0][RTW89_IC][1][47] = -2, + [0][1][1][0][RTW89_IC][2][47] = 68, [0][1][1][0][RTW89_KCC][1][47] = 12, [0][1][1][0][RTW89_KCC][0][47] = 127, [0][1][1][0][RTW89_ACMA][1][47] = 127, @@ -39435,6 +40128,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][47] = 127, [0][1][1][0][RTW89_UK][1][47] = 127, [0][1][1][0][RTW89_UK][0][47] = 127, + [0][1][1][0][RTW89_THAILAND][1][47] = 127, + [0][1][1][0][RTW89_THAILAND][0][47] = 127, [0][1][1][0][RTW89_FCC][1][49] = -2, [0][1][1][0][RTW89_FCC][2][49] = 127, [0][1][1][0][RTW89_ETSI][1][49] = 127, @@ -39442,6 +40137,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][49] = 127, [0][1][1][0][RTW89_MKK][0][49] = 127, [0][1][1][0][RTW89_IC][1][49] = -2, + [0][1][1][0][RTW89_IC][2][49] = 68, [0][1][1][0][RTW89_KCC][1][49] = 12, [0][1][1][0][RTW89_KCC][0][49] = 127, [0][1][1][0][RTW89_ACMA][1][49] = 127, @@ -39451,6 +40147,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][49] = 127, [0][1][1][0][RTW89_UK][1][49] = 127, [0][1][1][0][RTW89_UK][0][49] = 127, + [0][1][1][0][RTW89_THAILAND][1][49] = 127, + [0][1][1][0][RTW89_THAILAND][0][49] = 127, [0][1][1][0][RTW89_FCC][1][51] = -2, [0][1][1][0][RTW89_FCC][2][51] = 127, [0][1][1][0][RTW89_ETSI][1][51] = 127, @@ -39458,6 +40156,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][51] = 127, [0][1][1][0][RTW89_MKK][0][51] = 127, [0][1][1][0][RTW89_IC][1][51] = -2, + [0][1][1][0][RTW89_IC][2][51] = 68, [0][1][1][0][RTW89_KCC][1][51] = 12, [0][1][1][0][RTW89_KCC][0][51] = 127, [0][1][1][0][RTW89_ACMA][1][51] = 127, @@ -39467,6 +40166,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][51] = 127, [0][1][1][0][RTW89_UK][1][51] = 127, [0][1][1][0][RTW89_UK][0][51] = 127, + [0][1][1][0][RTW89_THAILAND][1][51] = 127, + [0][1][1][0][RTW89_THAILAND][0][51] = 127, [0][1][1][0][RTW89_FCC][1][53] = -2, [0][1][1][0][RTW89_FCC][2][53] = 127, [0][1][1][0][RTW89_ETSI][1][53] = 127, @@ -39474,6 +40175,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][53] = 127, [0][1][1][0][RTW89_MKK][0][53] = 127, [0][1][1][0][RTW89_IC][1][53] = -2, + [0][1][1][0][RTW89_IC][2][53] = 68, [0][1][1][0][RTW89_KCC][1][53] = 12, [0][1][1][0][RTW89_KCC][0][53] = 127, [0][1][1][0][RTW89_ACMA][1][53] = 127, @@ -39483,6 +40185,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][53] = 127, [0][1][1][0][RTW89_UK][1][53] = 127, [0][1][1][0][RTW89_UK][0][53] = 127, + [0][1][1][0][RTW89_THAILAND][1][53] = 127, + [0][1][1][0][RTW89_THAILAND][0][53] = 127, [0][1][1][0][RTW89_FCC][1][55] = -2, [0][1][1][0][RTW89_FCC][2][55] = 68, [0][1][1][0][RTW89_ETSI][1][55] = 127, @@ -39490,6 +40194,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][55] = 127, [0][1][1][0][RTW89_MKK][0][55] = 127, [0][1][1][0][RTW89_IC][1][55] = -2, + [0][1][1][0][RTW89_IC][2][55] = 68, [0][1][1][0][RTW89_KCC][1][55] = 12, [0][1][1][0][RTW89_KCC][0][55] = 127, [0][1][1][0][RTW89_ACMA][1][55] = 127, @@ -39499,6 +40204,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][55] = 127, [0][1][1][0][RTW89_UK][1][55] = 127, [0][1][1][0][RTW89_UK][0][55] = 127, + [0][1][1][0][RTW89_THAILAND][1][55] = 127, + [0][1][1][0][RTW89_THAILAND][0][55] = 127, [0][1][1][0][RTW89_FCC][1][57] = -2, [0][1][1][0][RTW89_FCC][2][57] = 68, [0][1][1][0][RTW89_ETSI][1][57] = 127, @@ -39506,6 +40213,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][57] = 127, [0][1][1][0][RTW89_MKK][0][57] = 127, [0][1][1][0][RTW89_IC][1][57] = -2, + [0][1][1][0][RTW89_IC][2][57] = 68, [0][1][1][0][RTW89_KCC][1][57] = 12, [0][1][1][0][RTW89_KCC][0][57] = 127, [0][1][1][0][RTW89_ACMA][1][57] = 127, @@ -39515,6 +40223,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][57] = 127, [0][1][1][0][RTW89_UK][1][57] = 127, [0][1][1][0][RTW89_UK][0][57] = 127, + [0][1][1][0][RTW89_THAILAND][1][57] = 127, + [0][1][1][0][RTW89_THAILAND][0][57] = 127, [0][1][1][0][RTW89_FCC][1][59] = -2, [0][1][1][0][RTW89_FCC][2][59] = 68, [0][1][1][0][RTW89_ETSI][1][59] = 127, @@ -39522,6 +40232,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][59] = 127, [0][1][1][0][RTW89_MKK][0][59] = 127, [0][1][1][0][RTW89_IC][1][59] = -2, + [0][1][1][0][RTW89_IC][2][59] = 68, [0][1][1][0][RTW89_KCC][1][59] = 12, [0][1][1][0][RTW89_KCC][0][59] = 127, [0][1][1][0][RTW89_ACMA][1][59] = 127, @@ -39531,6 +40242,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][59] = 127, [0][1][1][0][RTW89_UK][1][59] = 127, [0][1][1][0][RTW89_UK][0][59] = 127, + [0][1][1][0][RTW89_THAILAND][1][59] = 127, + [0][1][1][0][RTW89_THAILAND][0][59] = 127, [0][1][1][0][RTW89_FCC][1][60] = -2, [0][1][1][0][RTW89_FCC][2][60] = 68, [0][1][1][0][RTW89_ETSI][1][60] = 127, @@ -39538,6 +40251,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][60] = 127, [0][1][1][0][RTW89_MKK][0][60] = 127, [0][1][1][0][RTW89_IC][1][60] = -2, + [0][1][1][0][RTW89_IC][2][60] = 68, [0][1][1][0][RTW89_KCC][1][60] = 12, [0][1][1][0][RTW89_KCC][0][60] = 127, [0][1][1][0][RTW89_ACMA][1][60] = 127, @@ -39547,6 +40261,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][60] = 127, [0][1][1][0][RTW89_UK][1][60] = 127, [0][1][1][0][RTW89_UK][0][60] = 127, + [0][1][1][0][RTW89_THAILAND][1][60] = 127, + [0][1][1][0][RTW89_THAILAND][0][60] = 127, [0][1][1][0][RTW89_FCC][1][62] = -2, [0][1][1][0][RTW89_FCC][2][62] = 68, [0][1][1][0][RTW89_ETSI][1][62] = 127, @@ -39554,6 +40270,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][62] = 127, [0][1][1][0][RTW89_MKK][0][62] = 127, [0][1][1][0][RTW89_IC][1][62] = -2, + [0][1][1][0][RTW89_IC][2][62] = 68, [0][1][1][0][RTW89_KCC][1][62] = 12, [0][1][1][0][RTW89_KCC][0][62] = 127, [0][1][1][0][RTW89_ACMA][1][62] = 127, @@ -39563,6 +40280,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][62] = 127, [0][1][1][0][RTW89_UK][1][62] = 127, [0][1][1][0][RTW89_UK][0][62] = 127, + [0][1][1][0][RTW89_THAILAND][1][62] = 127, + [0][1][1][0][RTW89_THAILAND][0][62] = 127, [0][1][1][0][RTW89_FCC][1][64] = -2, [0][1][1][0][RTW89_FCC][2][64] = 68, [0][1][1][0][RTW89_ETSI][1][64] = 127, @@ -39570,6 +40289,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][64] = 127, [0][1][1][0][RTW89_MKK][0][64] = 127, [0][1][1][0][RTW89_IC][1][64] = -2, + [0][1][1][0][RTW89_IC][2][64] = 68, [0][1][1][0][RTW89_KCC][1][64] = 12, [0][1][1][0][RTW89_KCC][0][64] = 127, [0][1][1][0][RTW89_ACMA][1][64] = 127, @@ -39579,6 +40299,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][64] = 127, [0][1][1][0][RTW89_UK][1][64] = 127, [0][1][1][0][RTW89_UK][0][64] = 127, + [0][1][1][0][RTW89_THAILAND][1][64] = 127, + [0][1][1][0][RTW89_THAILAND][0][64] = 127, [0][1][1][0][RTW89_FCC][1][66] = -2, [0][1][1][0][RTW89_FCC][2][66] = 68, [0][1][1][0][RTW89_ETSI][1][66] = 127, @@ -39586,6 +40308,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][66] = 127, [0][1][1][0][RTW89_MKK][0][66] = 127, [0][1][1][0][RTW89_IC][1][66] = -2, + [0][1][1][0][RTW89_IC][2][66] = 68, [0][1][1][0][RTW89_KCC][1][66] = 12, [0][1][1][0][RTW89_KCC][0][66] = 127, [0][1][1][0][RTW89_ACMA][1][66] = 127, @@ -39595,6 +40318,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][66] = 127, [0][1][1][0][RTW89_UK][1][66] = 127, [0][1][1][0][RTW89_UK][0][66] = 127, + [0][1][1][0][RTW89_THAILAND][1][66] = 127, + [0][1][1][0][RTW89_THAILAND][0][66] = 127, [0][1][1][0][RTW89_FCC][1][68] = -2, [0][1][1][0][RTW89_FCC][2][68] = 68, [0][1][1][0][RTW89_ETSI][1][68] = 127, @@ -39602,6 +40327,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][68] = 127, [0][1][1][0][RTW89_MKK][0][68] = 127, [0][1][1][0][RTW89_IC][1][68] = -2, + [0][1][1][0][RTW89_IC][2][68] = 68, [0][1][1][0][RTW89_KCC][1][68] = 12, [0][1][1][0][RTW89_KCC][0][68] = 127, [0][1][1][0][RTW89_ACMA][1][68] = 127, @@ -39611,6 +40337,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][68] = 127, [0][1][1][0][RTW89_UK][1][68] = 127, [0][1][1][0][RTW89_UK][0][68] = 127, + [0][1][1][0][RTW89_THAILAND][1][68] = 127, + [0][1][1][0][RTW89_THAILAND][0][68] = 127, [0][1][1][0][RTW89_FCC][1][70] = -2, [0][1][1][0][RTW89_FCC][2][70] = 68, [0][1][1][0][RTW89_ETSI][1][70] = 127, @@ -39618,6 +40346,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][70] = 127, [0][1][1][0][RTW89_MKK][0][70] = 127, [0][1][1][0][RTW89_IC][1][70] = -2, + [0][1][1][0][RTW89_IC][2][70] = 68, [0][1][1][0][RTW89_KCC][1][70] = 12, [0][1][1][0][RTW89_KCC][0][70] = 127, [0][1][1][0][RTW89_ACMA][1][70] = 127, @@ -39627,6 +40356,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][70] = 127, [0][1][1][0][RTW89_UK][1][70] = 127, [0][1][1][0][RTW89_UK][0][70] = 127, + [0][1][1][0][RTW89_THAILAND][1][70] = 127, + [0][1][1][0][RTW89_THAILAND][0][70] = 127, [0][1][1][0][RTW89_FCC][1][72] = -2, [0][1][1][0][RTW89_FCC][2][72] = 68, [0][1][1][0][RTW89_ETSI][1][72] = 127, @@ -39634,6 +40365,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][72] = 127, [0][1][1][0][RTW89_MKK][0][72] = 127, [0][1][1][0][RTW89_IC][1][72] = -2, + [0][1][1][0][RTW89_IC][2][72] = 68, [0][1][1][0][RTW89_KCC][1][72] = 12, [0][1][1][0][RTW89_KCC][0][72] = 127, [0][1][1][0][RTW89_ACMA][1][72] = 127, @@ -39643,6 +40375,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][72] = 127, [0][1][1][0][RTW89_UK][1][72] = 127, [0][1][1][0][RTW89_UK][0][72] = 127, + [0][1][1][0][RTW89_THAILAND][1][72] = 127, + [0][1][1][0][RTW89_THAILAND][0][72] = 127, [0][1][1][0][RTW89_FCC][1][74] = -2, [0][1][1][0][RTW89_FCC][2][74] = 68, [0][1][1][0][RTW89_ETSI][1][74] = 127, @@ -39650,6 +40384,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][74] = 127, [0][1][1][0][RTW89_MKK][0][74] = 127, [0][1][1][0][RTW89_IC][1][74] = -2, + [0][1][1][0][RTW89_IC][2][74] = 68, [0][1][1][0][RTW89_KCC][1][74] = 12, [0][1][1][0][RTW89_KCC][0][74] = 127, [0][1][1][0][RTW89_ACMA][1][74] = 127, @@ -39659,6 +40394,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][74] = 127, [0][1][1][0][RTW89_UK][1][74] = 127, [0][1][1][0][RTW89_UK][0][74] = 127, + [0][1][1][0][RTW89_THAILAND][1][74] = 127, + [0][1][1][0][RTW89_THAILAND][0][74] = 127, [0][1][1][0][RTW89_FCC][1][75] = -2, [0][1][1][0][RTW89_FCC][2][75] = 68, [0][1][1][0][RTW89_ETSI][1][75] = 127, @@ -39666,6 +40403,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][75] = 127, [0][1][1][0][RTW89_MKK][0][75] = 127, [0][1][1][0][RTW89_IC][1][75] = -2, + [0][1][1][0][RTW89_IC][2][75] = 68, [0][1][1][0][RTW89_KCC][1][75] = 12, [0][1][1][0][RTW89_KCC][0][75] = 127, [0][1][1][0][RTW89_ACMA][1][75] = 127, @@ -39675,6 +40413,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][75] = 127, [0][1][1][0][RTW89_UK][1][75] = 127, [0][1][1][0][RTW89_UK][0][75] = 127, + [0][1][1][0][RTW89_THAILAND][1][75] = 127, + [0][1][1][0][RTW89_THAILAND][0][75] = 127, [0][1][1][0][RTW89_FCC][1][77] = -2, [0][1][1][0][RTW89_FCC][2][77] = 68, [0][1][1][0][RTW89_ETSI][1][77] = 127, @@ -39682,6 +40422,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][77] = 127, [0][1][1][0][RTW89_MKK][0][77] = 127, [0][1][1][0][RTW89_IC][1][77] = -2, + [0][1][1][0][RTW89_IC][2][77] = 68, [0][1][1][0][RTW89_KCC][1][77] = 12, [0][1][1][0][RTW89_KCC][0][77] = 127, [0][1][1][0][RTW89_ACMA][1][77] = 127, @@ -39691,6 +40432,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][77] = 127, [0][1][1][0][RTW89_UK][1][77] = 127, [0][1][1][0][RTW89_UK][0][77] = 127, + [0][1][1][0][RTW89_THAILAND][1][77] = 127, + [0][1][1][0][RTW89_THAILAND][0][77] = 127, [0][1][1][0][RTW89_FCC][1][79] = -2, [0][1][1][0][RTW89_FCC][2][79] = 68, [0][1][1][0][RTW89_ETSI][1][79] = 127, @@ -39698,6 +40441,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][79] = 127, [0][1][1][0][RTW89_MKK][0][79] = 127, [0][1][1][0][RTW89_IC][1][79] = -2, + [0][1][1][0][RTW89_IC][2][79] = 68, [0][1][1][0][RTW89_KCC][1][79] = 12, [0][1][1][0][RTW89_KCC][0][79] = 127, [0][1][1][0][RTW89_ACMA][1][79] = 127, @@ -39707,6 +40451,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][79] = 127, [0][1][1][0][RTW89_UK][1][79] = 127, [0][1][1][0][RTW89_UK][0][79] = 127, + [0][1][1][0][RTW89_THAILAND][1][79] = 127, + [0][1][1][0][RTW89_THAILAND][0][79] = 127, [0][1][1][0][RTW89_FCC][1][81] = -2, [0][1][1][0][RTW89_FCC][2][81] = 68, [0][1][1][0][RTW89_ETSI][1][81] = 127, @@ -39714,6 +40460,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][81] = 127, [0][1][1][0][RTW89_MKK][0][81] = 127, [0][1][1][0][RTW89_IC][1][81] = -2, + [0][1][1][0][RTW89_IC][2][81] = 68, [0][1][1][0][RTW89_KCC][1][81] = 12, [0][1][1][0][RTW89_KCC][0][81] = 127, [0][1][1][0][RTW89_ACMA][1][81] = 127, @@ -39723,6 +40470,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][81] = 127, [0][1][1][0][RTW89_UK][1][81] = 127, [0][1][1][0][RTW89_UK][0][81] = 127, + [0][1][1][0][RTW89_THAILAND][1][81] = 127, + [0][1][1][0][RTW89_THAILAND][0][81] = 127, [0][1][1][0][RTW89_FCC][1][83] = -2, [0][1][1][0][RTW89_FCC][2][83] = 68, [0][1][1][0][RTW89_ETSI][1][83] = 127, @@ -39730,6 +40479,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][83] = 127, [0][1][1][0][RTW89_MKK][0][83] = 127, [0][1][1][0][RTW89_IC][1][83] = -2, + [0][1][1][0][RTW89_IC][2][83] = 68, [0][1][1][0][RTW89_KCC][1][83] = 20, [0][1][1][0][RTW89_KCC][0][83] = 127, [0][1][1][0][RTW89_ACMA][1][83] = 127, @@ -39739,6 +40489,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][83] = 127, [0][1][1][0][RTW89_UK][1][83] = 127, [0][1][1][0][RTW89_UK][0][83] = 127, + [0][1][1][0][RTW89_THAILAND][1][83] = 127, + [0][1][1][0][RTW89_THAILAND][0][83] = 127, [0][1][1][0][RTW89_FCC][1][85] = -2, [0][1][1][0][RTW89_FCC][2][85] = 68, [0][1][1][0][RTW89_ETSI][1][85] = 127, @@ -39746,6 +40498,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][85] = 127, [0][1][1][0][RTW89_MKK][0][85] = 127, [0][1][1][0][RTW89_IC][1][85] = -2, + [0][1][1][0][RTW89_IC][2][85] = 68, [0][1][1][0][RTW89_KCC][1][85] = 20, [0][1][1][0][RTW89_KCC][0][85] = 127, [0][1][1][0][RTW89_ACMA][1][85] = 127, @@ -39755,6 +40508,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][85] = 127, [0][1][1][0][RTW89_UK][1][85] = 127, [0][1][1][0][RTW89_UK][0][85] = 127, + [0][1][1][0][RTW89_THAILAND][1][85] = 127, + [0][1][1][0][RTW89_THAILAND][0][85] = 127, [0][1][1][0][RTW89_FCC][1][87] = -2, [0][1][1][0][RTW89_FCC][2][87] = 127, [0][1][1][0][RTW89_ETSI][1][87] = 127, @@ -39762,6 +40517,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][87] = 127, [0][1][1][0][RTW89_MKK][0][87] = 127, [0][1][1][0][RTW89_IC][1][87] = -2, + [0][1][1][0][RTW89_IC][2][87] = 127, [0][1][1][0][RTW89_KCC][1][87] = 20, [0][1][1][0][RTW89_KCC][0][87] = 127, [0][1][1][0][RTW89_ACMA][1][87] = 127, @@ -39771,6 +40527,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][87] = 127, [0][1][1][0][RTW89_UK][1][87] = 127, [0][1][1][0][RTW89_UK][0][87] = 127, + [0][1][1][0][RTW89_THAILAND][1][87] = 127, + [0][1][1][0][RTW89_THAILAND][0][87] = 127, [0][1][1][0][RTW89_FCC][1][89] = -2, [0][1][1][0][RTW89_FCC][2][89] = 127, [0][1][1][0][RTW89_ETSI][1][89] = 127, @@ -39778,6 +40536,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][89] = 127, [0][1][1][0][RTW89_MKK][0][89] = 127, [0][1][1][0][RTW89_IC][1][89] = -2, + [0][1][1][0][RTW89_IC][2][89] = 127, [0][1][1][0][RTW89_KCC][1][89] = 20, [0][1][1][0][RTW89_KCC][0][89] = 127, [0][1][1][0][RTW89_ACMA][1][89] = 127, @@ -39787,6 +40546,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][89] = 127, [0][1][1][0][RTW89_UK][1][89] = 127, [0][1][1][0][RTW89_UK][0][89] = 127, + [0][1][1][0][RTW89_THAILAND][1][89] = 127, + [0][1][1][0][RTW89_THAILAND][0][89] = 127, [0][1][1][0][RTW89_FCC][1][90] = -2, [0][1][1][0][RTW89_FCC][2][90] = 127, [0][1][1][0][RTW89_ETSI][1][90] = 127, @@ -39794,6 +40555,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][90] = 127, [0][1][1][0][RTW89_MKK][0][90] = 127, [0][1][1][0][RTW89_IC][1][90] = -2, + [0][1][1][0][RTW89_IC][2][90] = 127, [0][1][1][0][RTW89_KCC][1][90] = 20, [0][1][1][0][RTW89_KCC][0][90] = 127, [0][1][1][0][RTW89_ACMA][1][90] = 127, @@ -39803,6 +40565,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][90] = 127, [0][1][1][0][RTW89_UK][1][90] = 127, [0][1][1][0][RTW89_UK][0][90] = 127, + [0][1][1][0][RTW89_THAILAND][1][90] = 127, + [0][1][1][0][RTW89_THAILAND][0][90] = 127, [0][1][1][0][RTW89_FCC][1][92] = -2, [0][1][1][0][RTW89_FCC][2][92] = 127, [0][1][1][0][RTW89_ETSI][1][92] = 127, @@ -39810,6 +40574,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][92] = 127, [0][1][1][0][RTW89_MKK][0][92] = 127, [0][1][1][0][RTW89_IC][1][92] = -2, + [0][1][1][0][RTW89_IC][2][92] = 127, [0][1][1][0][RTW89_KCC][1][92] = 20, [0][1][1][0][RTW89_KCC][0][92] = 127, [0][1][1][0][RTW89_ACMA][1][92] = 127, @@ -39819,6 +40584,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][92] = 127, [0][1][1][0][RTW89_UK][1][92] = 127, [0][1][1][0][RTW89_UK][0][92] = 127, + [0][1][1][0][RTW89_THAILAND][1][92] = 127, + [0][1][1][0][RTW89_THAILAND][0][92] = 127, [0][1][1][0][RTW89_FCC][1][94] = -2, [0][1][1][0][RTW89_FCC][2][94] = 127, [0][1][1][0][RTW89_ETSI][1][94] = 127, @@ -39826,6 +40593,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][94] = 127, [0][1][1][0][RTW89_MKK][0][94] = 127, [0][1][1][0][RTW89_IC][1][94] = -2, + [0][1][1][0][RTW89_IC][2][94] = 127, [0][1][1][0][RTW89_KCC][1][94] = 20, [0][1][1][0][RTW89_KCC][0][94] = 127, [0][1][1][0][RTW89_ACMA][1][94] = 127, @@ -39835,6 +40603,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][94] = 127, [0][1][1][0][RTW89_UK][1][94] = 127, [0][1][1][0][RTW89_UK][0][94] = 127, + [0][1][1][0][RTW89_THAILAND][1][94] = 127, + [0][1][1][0][RTW89_THAILAND][0][94] = 127, [0][1][1][0][RTW89_FCC][1][96] = -2, [0][1][1][0][RTW89_FCC][2][96] = 127, [0][1][1][0][RTW89_ETSI][1][96] = 127, @@ -39842,6 +40612,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][96] = 127, [0][1][1][0][RTW89_MKK][0][96] = 127, [0][1][1][0][RTW89_IC][1][96] = -2, + [0][1][1][0][RTW89_IC][2][96] = 127, [0][1][1][0][RTW89_KCC][1][96] = 20, [0][1][1][0][RTW89_KCC][0][96] = 127, [0][1][1][0][RTW89_ACMA][1][96] = 127, @@ -39851,6 +40622,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][96] = 127, [0][1][1][0][RTW89_UK][1][96] = 127, [0][1][1][0][RTW89_UK][0][96] = 127, + [0][1][1][0][RTW89_THAILAND][1][96] = 127, + [0][1][1][0][RTW89_THAILAND][0][96] = 127, [0][1][1][0][RTW89_FCC][1][98] = -2, [0][1][1][0][RTW89_FCC][2][98] = 127, [0][1][1][0][RTW89_ETSI][1][98] = 127, @@ -39858,6 +40631,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][98] = 127, [0][1][1][0][RTW89_MKK][0][98] = 127, [0][1][1][0][RTW89_IC][1][98] = -2, + [0][1][1][0][RTW89_IC][2][98] = 127, [0][1][1][0][RTW89_KCC][1][98] = 20, [0][1][1][0][RTW89_KCC][0][98] = 127, [0][1][1][0][RTW89_ACMA][1][98] = 127, @@ -39867,6 +40641,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][98] = 127, [0][1][1][0][RTW89_UK][1][98] = 127, [0][1][1][0][RTW89_UK][0][98] = 127, + [0][1][1][0][RTW89_THAILAND][1][98] = 127, + [0][1][1][0][RTW89_THAILAND][0][98] = 127, [0][1][1][0][RTW89_FCC][1][100] = -2, [0][1][1][0][RTW89_FCC][2][100] = 127, [0][1][1][0][RTW89_ETSI][1][100] = 127, @@ -39874,6 +40650,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][100] = 127, [0][1][1][0][RTW89_MKK][0][100] = 127, [0][1][1][0][RTW89_IC][1][100] = -2, + [0][1][1][0][RTW89_IC][2][100] = 127, [0][1][1][0][RTW89_KCC][1][100] = 20, [0][1][1][0][RTW89_KCC][0][100] = 127, [0][1][1][0][RTW89_ACMA][1][100] = 127, @@ -39883,6 +40660,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][100] = 127, [0][1][1][0][RTW89_UK][1][100] = 127, [0][1][1][0][RTW89_UK][0][100] = 127, + [0][1][1][0][RTW89_THAILAND][1][100] = 127, + [0][1][1][0][RTW89_THAILAND][0][100] = 127, [0][1][1][0][RTW89_FCC][1][102] = -2, [0][1][1][0][RTW89_FCC][2][102] = 127, [0][1][1][0][RTW89_ETSI][1][102] = 127, @@ -39890,6 +40669,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][102] = 127, [0][1][1][0][RTW89_MKK][0][102] = 127, [0][1][1][0][RTW89_IC][1][102] = -2, + [0][1][1][0][RTW89_IC][2][102] = 127, [0][1][1][0][RTW89_KCC][1][102] = 20, [0][1][1][0][RTW89_KCC][0][102] = 127, [0][1][1][0][RTW89_ACMA][1][102] = 127, @@ -39899,6 +40679,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][102] = 127, [0][1][1][0][RTW89_UK][1][102] = 127, [0][1][1][0][RTW89_UK][0][102] = 127, + [0][1][1][0][RTW89_THAILAND][1][102] = 127, + [0][1][1][0][RTW89_THAILAND][0][102] = 127, [0][1][1][0][RTW89_FCC][1][104] = -2, [0][1][1][0][RTW89_FCC][2][104] = 127, [0][1][1][0][RTW89_ETSI][1][104] = 127, @@ -39906,6 +40688,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][104] = 127, [0][1][1][0][RTW89_MKK][0][104] = 127, [0][1][1][0][RTW89_IC][1][104] = -2, + [0][1][1][0][RTW89_IC][2][104] = 127, [0][1][1][0][RTW89_KCC][1][104] = 20, [0][1][1][0][RTW89_KCC][0][104] = 127, [0][1][1][0][RTW89_ACMA][1][104] = 127, @@ -39915,6 +40698,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][104] = 127, [0][1][1][0][RTW89_UK][1][104] = 127, [0][1][1][0][RTW89_UK][0][104] = 127, + [0][1][1][0][RTW89_THAILAND][1][104] = 127, + [0][1][1][0][RTW89_THAILAND][0][104] = 127, [0][1][1][0][RTW89_FCC][1][105] = -2, [0][1][1][0][RTW89_FCC][2][105] = 127, [0][1][1][0][RTW89_ETSI][1][105] = 127, @@ -39922,6 +40707,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][105] = 127, [0][1][1][0][RTW89_MKK][0][105] = 127, [0][1][1][0][RTW89_IC][1][105] = -2, + [0][1][1][0][RTW89_IC][2][105] = 127, [0][1][1][0][RTW89_KCC][1][105] = 20, [0][1][1][0][RTW89_KCC][0][105] = 127, [0][1][1][0][RTW89_ACMA][1][105] = 127, @@ -39931,6 +40717,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][105] = 127, [0][1][1][0][RTW89_UK][1][105] = 127, [0][1][1][0][RTW89_UK][0][105] = 127, + [0][1][1][0][RTW89_THAILAND][1][105] = 127, + [0][1][1][0][RTW89_THAILAND][0][105] = 127, [0][1][1][0][RTW89_FCC][1][107] = 1, [0][1][1][0][RTW89_FCC][2][107] = 127, [0][1][1][0][RTW89_ETSI][1][107] = 127, @@ -39938,6 +40726,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][107] = 127, [0][1][1][0][RTW89_MKK][0][107] = 127, [0][1][1][0][RTW89_IC][1][107] = 1, + [0][1][1][0][RTW89_IC][2][107] = 127, [0][1][1][0][RTW89_KCC][1][107] = 20, [0][1][1][0][RTW89_KCC][0][107] = 127, [0][1][1][0][RTW89_ACMA][1][107] = 127, @@ -39947,6 +40736,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][107] = 127, [0][1][1][0][RTW89_UK][1][107] = 127, [0][1][1][0][RTW89_UK][0][107] = 127, + [0][1][1][0][RTW89_THAILAND][1][107] = 127, + [0][1][1][0][RTW89_THAILAND][0][107] = 127, [0][1][1][0][RTW89_FCC][1][109] = 1, [0][1][1][0][RTW89_FCC][2][109] = 127, [0][1][1][0][RTW89_ETSI][1][109] = 127, @@ -39954,6 +40745,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][109] = 127, [0][1][1][0][RTW89_MKK][0][109] = 127, [0][1][1][0][RTW89_IC][1][109] = 1, + [0][1][1][0][RTW89_IC][2][109] = 127, [0][1][1][0][RTW89_KCC][1][109] = 20, [0][1][1][0][RTW89_KCC][0][109] = 127, [0][1][1][0][RTW89_ACMA][1][109] = 127, @@ -39963,6 +40755,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][109] = 127, [0][1][1][0][RTW89_UK][1][109] = 127, [0][1][1][0][RTW89_UK][0][109] = 127, + [0][1][1][0][RTW89_THAILAND][1][109] = 127, + [0][1][1][0][RTW89_THAILAND][0][109] = 127, [0][1][1][0][RTW89_FCC][1][111] = 127, [0][1][1][0][RTW89_FCC][2][111] = 127, [0][1][1][0][RTW89_ETSI][1][111] = 127, @@ -39970,6 +40764,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][111] = 127, [0][1][1][0][RTW89_MKK][0][111] = 127, [0][1][1][0][RTW89_IC][1][111] = 127, + [0][1][1][0][RTW89_IC][2][111] = 127, [0][1][1][0][RTW89_KCC][1][111] = 127, [0][1][1][0][RTW89_KCC][0][111] = 127, [0][1][1][0][RTW89_ACMA][1][111] = 127, @@ -39979,6 +40774,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][111] = 127, [0][1][1][0][RTW89_UK][1][111] = 127, [0][1][1][0][RTW89_UK][0][111] = 127, + [0][1][1][0][RTW89_THAILAND][1][111] = 127, + [0][1][1][0][RTW89_THAILAND][0][111] = 127, [0][1][1][0][RTW89_FCC][1][113] = 127, [0][1][1][0][RTW89_FCC][2][113] = 127, [0][1][1][0][RTW89_ETSI][1][113] = 127, @@ -39986,6 +40783,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][113] = 127, [0][1][1][0][RTW89_MKK][0][113] = 127, [0][1][1][0][RTW89_IC][1][113] = 127, + [0][1][1][0][RTW89_IC][2][113] = 127, [0][1][1][0][RTW89_KCC][1][113] = 127, [0][1][1][0][RTW89_KCC][0][113] = 127, [0][1][1][0][RTW89_ACMA][1][113] = 127, @@ -39995,6 +40793,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][113] = 127, [0][1][1][0][RTW89_UK][1][113] = 127, [0][1][1][0][RTW89_UK][0][113] = 127, + [0][1][1][0][RTW89_THAILAND][1][113] = 127, + [0][1][1][0][RTW89_THAILAND][0][113] = 127, [0][1][1][0][RTW89_FCC][1][115] = 127, [0][1][1][0][RTW89_FCC][2][115] = 127, [0][1][1][0][RTW89_ETSI][1][115] = 127, @@ -40002,6 +40802,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][115] = 127, [0][1][1][0][RTW89_MKK][0][115] = 127, [0][1][1][0][RTW89_IC][1][115] = 127, + [0][1][1][0][RTW89_IC][2][115] = 127, [0][1][1][0][RTW89_KCC][1][115] = 127, [0][1][1][0][RTW89_KCC][0][115] = 127, [0][1][1][0][RTW89_ACMA][1][115] = 127, @@ -40011,6 +40812,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][115] = 127, [0][1][1][0][RTW89_UK][1][115] = 127, [0][1][1][0][RTW89_UK][0][115] = 127, + [0][1][1][0][RTW89_THAILAND][1][115] = 127, + [0][1][1][0][RTW89_THAILAND][0][115] = 127, [0][1][1][0][RTW89_FCC][1][117] = 127, [0][1][1][0][RTW89_FCC][2][117] = 127, [0][1][1][0][RTW89_ETSI][1][117] = 127, @@ -40018,6 +40821,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][117] = 127, [0][1][1][0][RTW89_MKK][0][117] = 127, [0][1][1][0][RTW89_IC][1][117] = 127, + [0][1][1][0][RTW89_IC][2][117] = 127, [0][1][1][0][RTW89_KCC][1][117] = 127, [0][1][1][0][RTW89_KCC][0][117] = 127, [0][1][1][0][RTW89_ACMA][1][117] = 127, @@ -40027,6 +40831,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][117] = 127, [0][1][1][0][RTW89_UK][1][117] = 127, [0][1][1][0][RTW89_UK][0][117] = 127, + [0][1][1][0][RTW89_THAILAND][1][117] = 127, + [0][1][1][0][RTW89_THAILAND][0][117] = 127, [0][1][1][0][RTW89_FCC][1][119] = 127, [0][1][1][0][RTW89_FCC][2][119] = 127, [0][1][1][0][RTW89_ETSI][1][119] = 127, @@ -40034,6 +40840,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][119] = 127, [0][1][1][0][RTW89_MKK][0][119] = 127, [0][1][1][0][RTW89_IC][1][119] = 127, + [0][1][1][0][RTW89_IC][2][119] = 127, [0][1][1][0][RTW89_KCC][1][119] = 127, [0][1][1][0][RTW89_KCC][0][119] = 127, [0][1][1][0][RTW89_ACMA][1][119] = 127, @@ -40043,6 +40850,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][119] = 127, [0][1][1][0][RTW89_UK][1][119] = 127, [0][1][1][0][RTW89_UK][0][119] = 127, + [0][1][1][0][RTW89_THAILAND][1][119] = 127, + [0][1][1][0][RTW89_THAILAND][0][119] = 127, [0][0][2][0][RTW89_FCC][1][0] = 24, [0][0][2][0][RTW89_FCC][2][0] = 56, [0][0][2][0][RTW89_ETSI][1][0] = 66, @@ -40050,6 +40859,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][0] = 66, [0][0][2][0][RTW89_MKK][0][0] = 26, [0][0][2][0][RTW89_IC][1][0] = 24, + [0][0][2][0][RTW89_IC][2][0] = 56, [0][0][2][0][RTW89_KCC][1][0] = 24, [0][0][2][0][RTW89_KCC][0][0] = 24, [0][0][2][0][RTW89_ACMA][1][0] = 66, @@ -40059,6 +40869,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][0] = 28, [0][0][2][0][RTW89_UK][1][0] = 66, [0][0][2][0][RTW89_UK][0][0] = 28, + [0][0][2][0][RTW89_THAILAND][1][0] = 56, + [0][0][2][0][RTW89_THAILAND][0][0] = 24, [0][0][2][0][RTW89_FCC][1][2] = 22, [0][0][2][0][RTW89_FCC][2][2] = 56, [0][0][2][0][RTW89_ETSI][1][2] = 66, @@ -40066,6 +40878,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][2] = 66, [0][0][2][0][RTW89_MKK][0][2] = 26, [0][0][2][0][RTW89_IC][1][2] = 22, + [0][0][2][0][RTW89_IC][2][2] = 56, [0][0][2][0][RTW89_KCC][1][2] = 24, [0][0][2][0][RTW89_KCC][0][2] = 24, [0][0][2][0][RTW89_ACMA][1][2] = 66, @@ -40075,6 +40888,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][2] = 28, [0][0][2][0][RTW89_UK][1][2] = 66, [0][0][2][0][RTW89_UK][0][2] = 28, + [0][0][2][0][RTW89_THAILAND][1][2] = 56, + [0][0][2][0][RTW89_THAILAND][0][2] = 22, [0][0][2][0][RTW89_FCC][1][4] = 22, [0][0][2][0][RTW89_FCC][2][4] = 56, [0][0][2][0][RTW89_ETSI][1][4] = 66, @@ -40082,6 +40897,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][4] = 66, [0][0][2][0][RTW89_MKK][0][4] = 26, [0][0][2][0][RTW89_IC][1][4] = 22, + [0][0][2][0][RTW89_IC][2][4] = 56, [0][0][2][0][RTW89_KCC][1][4] = 24, [0][0][2][0][RTW89_KCC][0][4] = 24, [0][0][2][0][RTW89_ACMA][1][4] = 66, @@ -40091,6 +40907,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][4] = 28, [0][0][2][0][RTW89_UK][1][4] = 66, [0][0][2][0][RTW89_UK][0][4] = 28, + [0][0][2][0][RTW89_THAILAND][1][4] = 56, + [0][0][2][0][RTW89_THAILAND][0][4] = 22, [0][0][2][0][RTW89_FCC][1][6] = 22, [0][0][2][0][RTW89_FCC][2][6] = 56, [0][0][2][0][RTW89_ETSI][1][6] = 66, @@ -40098,6 +40916,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][6] = 66, [0][0][2][0][RTW89_MKK][0][6] = 26, [0][0][2][0][RTW89_IC][1][6] = 22, + [0][0][2][0][RTW89_IC][2][6] = 56, [0][0][2][0][RTW89_KCC][1][6] = 24, [0][0][2][0][RTW89_KCC][0][6] = 24, [0][0][2][0][RTW89_ACMA][1][6] = 66, @@ -40107,6 +40926,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][6] = 28, [0][0][2][0][RTW89_UK][1][6] = 66, [0][0][2][0][RTW89_UK][0][6] = 28, + [0][0][2][0][RTW89_THAILAND][1][6] = 56, + [0][0][2][0][RTW89_THAILAND][0][6] = 22, [0][0][2][0][RTW89_FCC][1][8] = 22, [0][0][2][0][RTW89_FCC][2][8] = 56, [0][0][2][0][RTW89_ETSI][1][8] = 66, @@ -40114,6 +40935,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][8] = 66, [0][0][2][0][RTW89_MKK][0][8] = 26, [0][0][2][0][RTW89_IC][1][8] = 22, + [0][0][2][0][RTW89_IC][2][8] = 56, [0][0][2][0][RTW89_KCC][1][8] = 24, [0][0][2][0][RTW89_KCC][0][8] = 24, [0][0][2][0][RTW89_ACMA][1][8] = 66, @@ -40123,6 +40945,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][8] = 28, [0][0][2][0][RTW89_UK][1][8] = 66, [0][0][2][0][RTW89_UK][0][8] = 28, + [0][0][2][0][RTW89_THAILAND][1][8] = 56, + [0][0][2][0][RTW89_THAILAND][0][8] = 22, [0][0][2][0][RTW89_FCC][1][10] = 22, [0][0][2][0][RTW89_FCC][2][10] = 56, [0][0][2][0][RTW89_ETSI][1][10] = 66, @@ -40130,6 +40954,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][10] = 66, [0][0][2][0][RTW89_MKK][0][10] = 26, [0][0][2][0][RTW89_IC][1][10] = 22, + [0][0][2][0][RTW89_IC][2][10] = 56, [0][0][2][0][RTW89_KCC][1][10] = 24, [0][0][2][0][RTW89_KCC][0][10] = 24, [0][0][2][0][RTW89_ACMA][1][10] = 66, @@ -40139,6 +40964,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][10] = 28, [0][0][2][0][RTW89_UK][1][10] = 66, [0][0][2][0][RTW89_UK][0][10] = 28, + [0][0][2][0][RTW89_THAILAND][1][10] = 56, + [0][0][2][0][RTW89_THAILAND][0][10] = 22, [0][0][2][0][RTW89_FCC][1][12] = 22, [0][0][2][0][RTW89_FCC][2][12] = 56, [0][0][2][0][RTW89_ETSI][1][12] = 66, @@ -40146,6 +40973,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][12] = 66, [0][0][2][0][RTW89_MKK][0][12] = 26, [0][0][2][0][RTW89_IC][1][12] = 22, + [0][0][2][0][RTW89_IC][2][12] = 56, [0][0][2][0][RTW89_KCC][1][12] = 24, [0][0][2][0][RTW89_KCC][0][12] = 24, [0][0][2][0][RTW89_ACMA][1][12] = 66, @@ -40155,6 +40983,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][12] = 28, [0][0][2][0][RTW89_UK][1][12] = 66, [0][0][2][0][RTW89_UK][0][12] = 28, + [0][0][2][0][RTW89_THAILAND][1][12] = 56, + [0][0][2][0][RTW89_THAILAND][0][12] = 22, [0][0][2][0][RTW89_FCC][1][14] = 22, [0][0][2][0][RTW89_FCC][2][14] = 56, [0][0][2][0][RTW89_ETSI][1][14] = 66, @@ -40162,6 +40992,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][14] = 66, [0][0][2][0][RTW89_MKK][0][14] = 26, [0][0][2][0][RTW89_IC][1][14] = 22, + [0][0][2][0][RTW89_IC][2][14] = 56, [0][0][2][0][RTW89_KCC][1][14] = 24, [0][0][2][0][RTW89_KCC][0][14] = 24, [0][0][2][0][RTW89_ACMA][1][14] = 66, @@ -40171,6 +41002,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][14] = 28, [0][0][2][0][RTW89_UK][1][14] = 66, [0][0][2][0][RTW89_UK][0][14] = 28, + [0][0][2][0][RTW89_THAILAND][1][14] = 56, + [0][0][2][0][RTW89_THAILAND][0][14] = 22, [0][0][2][0][RTW89_FCC][1][15] = 22, [0][0][2][0][RTW89_FCC][2][15] = 56, [0][0][2][0][RTW89_ETSI][1][15] = 66, @@ -40178,6 +41011,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][15] = 66, [0][0][2][0][RTW89_MKK][0][15] = 26, [0][0][2][0][RTW89_IC][1][15] = 22, + [0][0][2][0][RTW89_IC][2][15] = 56, [0][0][2][0][RTW89_KCC][1][15] = 24, [0][0][2][0][RTW89_KCC][0][15] = 24, [0][0][2][0][RTW89_ACMA][1][15] = 66, @@ -40187,6 +41021,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][15] = 28, [0][0][2][0][RTW89_UK][1][15] = 66, [0][0][2][0][RTW89_UK][0][15] = 28, + [0][0][2][0][RTW89_THAILAND][1][15] = 56, + [0][0][2][0][RTW89_THAILAND][0][15] = 22, [0][0][2][0][RTW89_FCC][1][17] = 22, [0][0][2][0][RTW89_FCC][2][17] = 56, [0][0][2][0][RTW89_ETSI][1][17] = 66, @@ -40194,6 +41030,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][17] = 66, [0][0][2][0][RTW89_MKK][0][17] = 26, [0][0][2][0][RTW89_IC][1][17] = 22, + [0][0][2][0][RTW89_IC][2][17] = 56, [0][0][2][0][RTW89_KCC][1][17] = 24, [0][0][2][0][RTW89_KCC][0][17] = 24, [0][0][2][0][RTW89_ACMA][1][17] = 66, @@ -40203,6 +41040,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][17] = 28, [0][0][2][0][RTW89_UK][1][17] = 66, [0][0][2][0][RTW89_UK][0][17] = 28, + [0][0][2][0][RTW89_THAILAND][1][17] = 56, + [0][0][2][0][RTW89_THAILAND][0][17] = 22, [0][0][2][0][RTW89_FCC][1][19] = 22, [0][0][2][0][RTW89_FCC][2][19] = 56, [0][0][2][0][RTW89_ETSI][1][19] = 66, @@ -40210,6 +41049,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][19] = 66, [0][0][2][0][RTW89_MKK][0][19] = 26, [0][0][2][0][RTW89_IC][1][19] = 22, + [0][0][2][0][RTW89_IC][2][19] = 56, [0][0][2][0][RTW89_KCC][1][19] = 24, [0][0][2][0][RTW89_KCC][0][19] = 24, [0][0][2][0][RTW89_ACMA][1][19] = 66, @@ -40219,6 +41059,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][19] = 28, [0][0][2][0][RTW89_UK][1][19] = 66, [0][0][2][0][RTW89_UK][0][19] = 28, + [0][0][2][0][RTW89_THAILAND][1][19] = 56, + [0][0][2][0][RTW89_THAILAND][0][19] = 22, [0][0][2][0][RTW89_FCC][1][21] = 22, [0][0][2][0][RTW89_FCC][2][21] = 56, [0][0][2][0][RTW89_ETSI][1][21] = 66, @@ -40226,6 +41068,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][21] = 66, [0][0][2][0][RTW89_MKK][0][21] = 26, [0][0][2][0][RTW89_IC][1][21] = 22, + [0][0][2][0][RTW89_IC][2][21] = 56, [0][0][2][0][RTW89_KCC][1][21] = 24, [0][0][2][0][RTW89_KCC][0][21] = 24, [0][0][2][0][RTW89_ACMA][1][21] = 66, @@ -40235,6 +41078,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][21] = 28, [0][0][2][0][RTW89_UK][1][21] = 66, [0][0][2][0][RTW89_UK][0][21] = 28, + [0][0][2][0][RTW89_THAILAND][1][21] = 56, + [0][0][2][0][RTW89_THAILAND][0][21] = 22, [0][0][2][0][RTW89_FCC][1][23] = 22, [0][0][2][0][RTW89_FCC][2][23] = 70, [0][0][2][0][RTW89_ETSI][1][23] = 66, @@ -40242,6 +41087,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][23] = 66, [0][0][2][0][RTW89_MKK][0][23] = 26, [0][0][2][0][RTW89_IC][1][23] = 22, + [0][0][2][0][RTW89_IC][2][23] = 70, [0][0][2][0][RTW89_KCC][1][23] = 24, [0][0][2][0][RTW89_KCC][0][23] = 26, [0][0][2][0][RTW89_ACMA][1][23] = 66, @@ -40251,6 +41097,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][23] = 28, [0][0][2][0][RTW89_UK][1][23] = 66, [0][0][2][0][RTW89_UK][0][23] = 28, + [0][0][2][0][RTW89_THAILAND][1][23] = 66, + [0][0][2][0][RTW89_THAILAND][0][23] = 22, [0][0][2][0][RTW89_FCC][1][25] = 22, [0][0][2][0][RTW89_FCC][2][25] = 70, [0][0][2][0][RTW89_ETSI][1][25] = 66, @@ -40258,6 +41106,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][25] = 66, [0][0][2][0][RTW89_MKK][0][25] = 26, [0][0][2][0][RTW89_IC][1][25] = 22, + [0][0][2][0][RTW89_IC][2][25] = 70, [0][0][2][0][RTW89_KCC][1][25] = 24, [0][0][2][0][RTW89_KCC][0][25] = 26, [0][0][2][0][RTW89_ACMA][1][25] = 66, @@ -40267,6 +41116,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][25] = 28, [0][0][2][0][RTW89_UK][1][25] = 66, [0][0][2][0][RTW89_UK][0][25] = 28, + [0][0][2][0][RTW89_THAILAND][1][25] = 66, + [0][0][2][0][RTW89_THAILAND][0][25] = 22, [0][0][2][0][RTW89_FCC][1][27] = 22, [0][0][2][0][RTW89_FCC][2][27] = 70, [0][0][2][0][RTW89_ETSI][1][27] = 66, @@ -40274,6 +41125,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][27] = 66, [0][0][2][0][RTW89_MKK][0][27] = 26, [0][0][2][0][RTW89_IC][1][27] = 22, + [0][0][2][0][RTW89_IC][2][27] = 70, [0][0][2][0][RTW89_KCC][1][27] = 24, [0][0][2][0][RTW89_KCC][0][27] = 26, [0][0][2][0][RTW89_ACMA][1][27] = 66, @@ -40283,6 +41135,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][27] = 28, [0][0][2][0][RTW89_UK][1][27] = 66, [0][0][2][0][RTW89_UK][0][27] = 28, + [0][0][2][0][RTW89_THAILAND][1][27] = 66, + [0][0][2][0][RTW89_THAILAND][0][27] = 22, [0][0][2][0][RTW89_FCC][1][29] = 22, [0][0][2][0][RTW89_FCC][2][29] = 70, [0][0][2][0][RTW89_ETSI][1][29] = 66, @@ -40290,6 +41144,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][29] = 66, [0][0][2][0][RTW89_MKK][0][29] = 26, [0][0][2][0][RTW89_IC][1][29] = 22, + [0][0][2][0][RTW89_IC][2][29] = 70, [0][0][2][0][RTW89_KCC][1][29] = 24, [0][0][2][0][RTW89_KCC][0][29] = 26, [0][0][2][0][RTW89_ACMA][1][29] = 66, @@ -40299,6 +41154,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][29] = 28, [0][0][2][0][RTW89_UK][1][29] = 66, [0][0][2][0][RTW89_UK][0][29] = 28, + [0][0][2][0][RTW89_THAILAND][1][29] = 66, + [0][0][2][0][RTW89_THAILAND][0][29] = 22, [0][0][2][0][RTW89_FCC][1][30] = 22, [0][0][2][0][RTW89_FCC][2][30] = 70, [0][0][2][0][RTW89_ETSI][1][30] = 66, @@ -40306,6 +41163,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][30] = 66, [0][0][2][0][RTW89_MKK][0][30] = 26, [0][0][2][0][RTW89_IC][1][30] = 22, + [0][0][2][0][RTW89_IC][2][30] = 70, [0][0][2][0][RTW89_KCC][1][30] = 24, [0][0][2][0][RTW89_KCC][0][30] = 26, [0][0][2][0][RTW89_ACMA][1][30] = 66, @@ -40315,6 +41173,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][30] = 28, [0][0][2][0][RTW89_UK][1][30] = 66, [0][0][2][0][RTW89_UK][0][30] = 28, + [0][0][2][0][RTW89_THAILAND][1][30] = 66, + [0][0][2][0][RTW89_THAILAND][0][30] = 22, [0][0][2][0][RTW89_FCC][1][32] = 22, [0][0][2][0][RTW89_FCC][2][32] = 70, [0][0][2][0][RTW89_ETSI][1][32] = 66, @@ -40322,6 +41182,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][32] = 66, [0][0][2][0][RTW89_MKK][0][32] = 26, [0][0][2][0][RTW89_IC][1][32] = 22, + [0][0][2][0][RTW89_IC][2][32] = 70, [0][0][2][0][RTW89_KCC][1][32] = 24, [0][0][2][0][RTW89_KCC][0][32] = 26, [0][0][2][0][RTW89_ACMA][1][32] = 66, @@ -40331,6 +41192,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][32] = 28, [0][0][2][0][RTW89_UK][1][32] = 66, [0][0][2][0][RTW89_UK][0][32] = 28, + [0][0][2][0][RTW89_THAILAND][1][32] = 66, + [0][0][2][0][RTW89_THAILAND][0][32] = 22, [0][0][2][0][RTW89_FCC][1][34] = 22, [0][0][2][0][RTW89_FCC][2][34] = 70, [0][0][2][0][RTW89_ETSI][1][34] = 66, @@ -40338,6 +41201,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][34] = 66, [0][0][2][0][RTW89_MKK][0][34] = 26, [0][0][2][0][RTW89_IC][1][34] = 22, + [0][0][2][0][RTW89_IC][2][34] = 70, [0][0][2][0][RTW89_KCC][1][34] = 24, [0][0][2][0][RTW89_KCC][0][34] = 26, [0][0][2][0][RTW89_ACMA][1][34] = 66, @@ -40347,6 +41211,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][34] = 28, [0][0][2][0][RTW89_UK][1][34] = 66, [0][0][2][0][RTW89_UK][0][34] = 28, + [0][0][2][0][RTW89_THAILAND][1][34] = 66, + [0][0][2][0][RTW89_THAILAND][0][34] = 22, [0][0][2][0][RTW89_FCC][1][36] = 22, [0][0][2][0][RTW89_FCC][2][36] = 70, [0][0][2][0][RTW89_ETSI][1][36] = 66, @@ -40354,6 +41220,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][36] = 66, [0][0][2][0][RTW89_MKK][0][36] = 26, [0][0][2][0][RTW89_IC][1][36] = 22, + [0][0][2][0][RTW89_IC][2][36] = 70, [0][0][2][0][RTW89_KCC][1][36] = 24, [0][0][2][0][RTW89_KCC][0][36] = 26, [0][0][2][0][RTW89_ACMA][1][36] = 66, @@ -40363,6 +41230,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][36] = 28, [0][0][2][0][RTW89_UK][1][36] = 66, [0][0][2][0][RTW89_UK][0][36] = 28, + [0][0][2][0][RTW89_THAILAND][1][36] = 66, + [0][0][2][0][RTW89_THAILAND][0][36] = 22, [0][0][2][0][RTW89_FCC][1][38] = 22, [0][0][2][0][RTW89_FCC][2][38] = 70, [0][0][2][0][RTW89_ETSI][1][38] = 66, @@ -40370,6 +41239,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][38] = 66, [0][0][2][0][RTW89_MKK][0][38] = 26, [0][0][2][0][RTW89_IC][1][38] = 22, + [0][0][2][0][RTW89_IC][2][38] = 70, [0][0][2][0][RTW89_KCC][1][38] = 24, [0][0][2][0][RTW89_KCC][0][38] = 26, [0][0][2][0][RTW89_ACMA][1][38] = 66, @@ -40379,6 +41249,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][38] = 28, [0][0][2][0][RTW89_UK][1][38] = 66, [0][0][2][0][RTW89_UK][0][38] = 28, + [0][0][2][0][RTW89_THAILAND][1][38] = 66, + [0][0][2][0][RTW89_THAILAND][0][38] = 22, [0][0][2][0][RTW89_FCC][1][40] = 22, [0][0][2][0][RTW89_FCC][2][40] = 70, [0][0][2][0][RTW89_ETSI][1][40] = 66, @@ -40386,6 +41258,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][40] = 66, [0][0][2][0][RTW89_MKK][0][40] = 26, [0][0][2][0][RTW89_IC][1][40] = 22, + [0][0][2][0][RTW89_IC][2][40] = 70, [0][0][2][0][RTW89_KCC][1][40] = 24, [0][0][2][0][RTW89_KCC][0][40] = 26, [0][0][2][0][RTW89_ACMA][1][40] = 66, @@ -40395,6 +41268,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][40] = 28, [0][0][2][0][RTW89_UK][1][40] = 66, [0][0][2][0][RTW89_UK][0][40] = 28, + [0][0][2][0][RTW89_THAILAND][1][40] = 66, + [0][0][2][0][RTW89_THAILAND][0][40] = 22, [0][0][2][0][RTW89_FCC][1][42] = 22, [0][0][2][0][RTW89_FCC][2][42] = 70, [0][0][2][0][RTW89_ETSI][1][42] = 66, @@ -40402,6 +41277,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][42] = 66, [0][0][2][0][RTW89_MKK][0][42] = 26, [0][0][2][0][RTW89_IC][1][42] = 22, + [0][0][2][0][RTW89_IC][2][42] = 70, [0][0][2][0][RTW89_KCC][1][42] = 24, [0][0][2][0][RTW89_KCC][0][42] = 26, [0][0][2][0][RTW89_ACMA][1][42] = 66, @@ -40411,6 +41287,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][42] = 28, [0][0][2][0][RTW89_UK][1][42] = 66, [0][0][2][0][RTW89_UK][0][42] = 28, + [0][0][2][0][RTW89_THAILAND][1][42] = 66, + [0][0][2][0][RTW89_THAILAND][0][42] = 22, [0][0][2][0][RTW89_FCC][1][44] = 22, [0][0][2][0][RTW89_FCC][2][44] = 70, [0][0][2][0][RTW89_ETSI][1][44] = 66, @@ -40418,6 +41296,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][44] = 44, [0][0][2][0][RTW89_MKK][0][44] = 28, [0][0][2][0][RTW89_IC][1][44] = 22, + [0][0][2][0][RTW89_IC][2][44] = 70, [0][0][2][0][RTW89_KCC][1][44] = 24, [0][0][2][0][RTW89_KCC][0][44] = 26, [0][0][2][0][RTW89_ACMA][1][44] = 66, @@ -40427,6 +41306,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][44] = 30, [0][0][2][0][RTW89_UK][1][44] = 66, [0][0][2][0][RTW89_UK][0][44] = 30, + [0][0][2][0][RTW89_THAILAND][1][44] = 68, + [0][0][2][0][RTW89_THAILAND][0][44] = 22, [0][0][2][0][RTW89_FCC][1][45] = 22, [0][0][2][0][RTW89_FCC][2][45] = 127, [0][0][2][0][RTW89_ETSI][1][45] = 127, @@ -40434,6 +41315,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][45] = 127, [0][0][2][0][RTW89_MKK][0][45] = 127, [0][0][2][0][RTW89_IC][1][45] = 22, + [0][0][2][0][RTW89_IC][2][45] = 70, [0][0][2][0][RTW89_KCC][1][45] = 24, [0][0][2][0][RTW89_KCC][0][45] = 127, [0][0][2][0][RTW89_ACMA][1][45] = 127, @@ -40443,6 +41325,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][45] = 127, [0][0][2][0][RTW89_UK][1][45] = 127, [0][0][2][0][RTW89_UK][0][45] = 127, + [0][0][2][0][RTW89_THAILAND][1][45] = 127, + [0][0][2][0][RTW89_THAILAND][0][45] = 127, [0][0][2][0][RTW89_FCC][1][47] = 22, [0][0][2][0][RTW89_FCC][2][47] = 127, [0][0][2][0][RTW89_ETSI][1][47] = 127, @@ -40450,6 +41334,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][47] = 127, [0][0][2][0][RTW89_MKK][0][47] = 127, [0][0][2][0][RTW89_IC][1][47] = 22, + [0][0][2][0][RTW89_IC][2][47] = 70, [0][0][2][0][RTW89_KCC][1][47] = 24, [0][0][2][0][RTW89_KCC][0][47] = 127, [0][0][2][0][RTW89_ACMA][1][47] = 127, @@ -40459,6 +41344,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][47] = 127, [0][0][2][0][RTW89_UK][1][47] = 127, [0][0][2][0][RTW89_UK][0][47] = 127, + [0][0][2][0][RTW89_THAILAND][1][47] = 127, + [0][0][2][0][RTW89_THAILAND][0][47] = 127, [0][0][2][0][RTW89_FCC][1][49] = 24, [0][0][2][0][RTW89_FCC][2][49] = 127, [0][0][2][0][RTW89_ETSI][1][49] = 127, @@ -40466,6 +41353,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][49] = 127, [0][0][2][0][RTW89_MKK][0][49] = 127, [0][0][2][0][RTW89_IC][1][49] = 24, + [0][0][2][0][RTW89_IC][2][49] = 70, [0][0][2][0][RTW89_KCC][1][49] = 24, [0][0][2][0][RTW89_KCC][0][49] = 127, [0][0][2][0][RTW89_ACMA][1][49] = 127, @@ -40475,6 +41363,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][49] = 127, [0][0][2][0][RTW89_UK][1][49] = 127, [0][0][2][0][RTW89_UK][0][49] = 127, + [0][0][2][0][RTW89_THAILAND][1][49] = 127, + [0][0][2][0][RTW89_THAILAND][0][49] = 127, [0][0][2][0][RTW89_FCC][1][51] = 22, [0][0][2][0][RTW89_FCC][2][51] = 127, [0][0][2][0][RTW89_ETSI][1][51] = 127, @@ -40482,6 +41372,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][51] = 127, [0][0][2][0][RTW89_MKK][0][51] = 127, [0][0][2][0][RTW89_IC][1][51] = 22, + [0][0][2][0][RTW89_IC][2][51] = 70, [0][0][2][0][RTW89_KCC][1][51] = 24, [0][0][2][0][RTW89_KCC][0][51] = 127, [0][0][2][0][RTW89_ACMA][1][51] = 127, @@ -40491,6 +41382,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][51] = 127, [0][0][2][0][RTW89_UK][1][51] = 127, [0][0][2][0][RTW89_UK][0][51] = 127, + [0][0][2][0][RTW89_THAILAND][1][51] = 127, + [0][0][2][0][RTW89_THAILAND][0][51] = 127, [0][0][2][0][RTW89_FCC][1][53] = 22, [0][0][2][0][RTW89_FCC][2][53] = 127, [0][0][2][0][RTW89_ETSI][1][53] = 127, @@ -40498,6 +41391,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][53] = 127, [0][0][2][0][RTW89_MKK][0][53] = 127, [0][0][2][0][RTW89_IC][1][53] = 22, + [0][0][2][0][RTW89_IC][2][53] = 70, [0][0][2][0][RTW89_KCC][1][53] = 24, [0][0][2][0][RTW89_KCC][0][53] = 127, [0][0][2][0][RTW89_ACMA][1][53] = 127, @@ -40507,6 +41401,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][53] = 127, [0][0][2][0][RTW89_UK][1][53] = 127, [0][0][2][0][RTW89_UK][0][53] = 127, + [0][0][2][0][RTW89_THAILAND][1][53] = 127, + [0][0][2][0][RTW89_THAILAND][0][53] = 127, [0][0][2][0][RTW89_FCC][1][55] = 22, [0][0][2][0][RTW89_FCC][2][55] = 68, [0][0][2][0][RTW89_ETSI][1][55] = 127, @@ -40514,6 +41410,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][55] = 127, [0][0][2][0][RTW89_MKK][0][55] = 127, [0][0][2][0][RTW89_IC][1][55] = 22, + [0][0][2][0][RTW89_IC][2][55] = 68, [0][0][2][0][RTW89_KCC][1][55] = 26, [0][0][2][0][RTW89_KCC][0][55] = 127, [0][0][2][0][RTW89_ACMA][1][55] = 127, @@ -40523,6 +41420,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][55] = 127, [0][0][2][0][RTW89_UK][1][55] = 127, [0][0][2][0][RTW89_UK][0][55] = 127, + [0][0][2][0][RTW89_THAILAND][1][55] = 127, + [0][0][2][0][RTW89_THAILAND][0][55] = 127, [0][0][2][0][RTW89_FCC][1][57] = 22, [0][0][2][0][RTW89_FCC][2][57] = 68, [0][0][2][0][RTW89_ETSI][1][57] = 127, @@ -40530,6 +41429,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][57] = 127, [0][0][2][0][RTW89_MKK][0][57] = 127, [0][0][2][0][RTW89_IC][1][57] = 22, + [0][0][2][0][RTW89_IC][2][57] = 68, [0][0][2][0][RTW89_KCC][1][57] = 26, [0][0][2][0][RTW89_KCC][0][57] = 127, [0][0][2][0][RTW89_ACMA][1][57] = 127, @@ -40539,6 +41439,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][57] = 127, [0][0][2][0][RTW89_UK][1][57] = 127, [0][0][2][0][RTW89_UK][0][57] = 127, + [0][0][2][0][RTW89_THAILAND][1][57] = 127, + [0][0][2][0][RTW89_THAILAND][0][57] = 127, [0][0][2][0][RTW89_FCC][1][59] = 22, [0][0][2][0][RTW89_FCC][2][59] = 68, [0][0][2][0][RTW89_ETSI][1][59] = 127, @@ -40546,6 +41448,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][59] = 127, [0][0][2][0][RTW89_MKK][0][59] = 127, [0][0][2][0][RTW89_IC][1][59] = 22, + [0][0][2][0][RTW89_IC][2][59] = 68, [0][0][2][0][RTW89_KCC][1][59] = 26, [0][0][2][0][RTW89_KCC][0][59] = 127, [0][0][2][0][RTW89_ACMA][1][59] = 127, @@ -40555,6 +41458,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][59] = 127, [0][0][2][0][RTW89_UK][1][59] = 127, [0][0][2][0][RTW89_UK][0][59] = 127, + [0][0][2][0][RTW89_THAILAND][1][59] = 127, + [0][0][2][0][RTW89_THAILAND][0][59] = 127, [0][0][2][0][RTW89_FCC][1][60] = 22, [0][0][2][0][RTW89_FCC][2][60] = 68, [0][0][2][0][RTW89_ETSI][1][60] = 127, @@ -40562,6 +41467,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][60] = 127, [0][0][2][0][RTW89_MKK][0][60] = 127, [0][0][2][0][RTW89_IC][1][60] = 22, + [0][0][2][0][RTW89_IC][2][60] = 68, [0][0][2][0][RTW89_KCC][1][60] = 26, [0][0][2][0][RTW89_KCC][0][60] = 127, [0][0][2][0][RTW89_ACMA][1][60] = 127, @@ -40571,6 +41477,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][60] = 127, [0][0][2][0][RTW89_UK][1][60] = 127, [0][0][2][0][RTW89_UK][0][60] = 127, + [0][0][2][0][RTW89_THAILAND][1][60] = 127, + [0][0][2][0][RTW89_THAILAND][0][60] = 127, [0][0][2][0][RTW89_FCC][1][62] = 22, [0][0][2][0][RTW89_FCC][2][62] = 68, [0][0][2][0][RTW89_ETSI][1][62] = 127, @@ -40578,6 +41486,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][62] = 127, [0][0][2][0][RTW89_MKK][0][62] = 127, [0][0][2][0][RTW89_IC][1][62] = 22, + [0][0][2][0][RTW89_IC][2][62] = 68, [0][0][2][0][RTW89_KCC][1][62] = 26, [0][0][2][0][RTW89_KCC][0][62] = 127, [0][0][2][0][RTW89_ACMA][1][62] = 127, @@ -40587,6 +41496,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][62] = 127, [0][0][2][0][RTW89_UK][1][62] = 127, [0][0][2][0][RTW89_UK][0][62] = 127, + [0][0][2][0][RTW89_THAILAND][1][62] = 127, + [0][0][2][0][RTW89_THAILAND][0][62] = 127, [0][0][2][0][RTW89_FCC][1][64] = 22, [0][0][2][0][RTW89_FCC][2][64] = 68, [0][0][2][0][RTW89_ETSI][1][64] = 127, @@ -40594,6 +41505,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][64] = 127, [0][0][2][0][RTW89_MKK][0][64] = 127, [0][0][2][0][RTW89_IC][1][64] = 22, + [0][0][2][0][RTW89_IC][2][64] = 68, [0][0][2][0][RTW89_KCC][1][64] = 26, [0][0][2][0][RTW89_KCC][0][64] = 127, [0][0][2][0][RTW89_ACMA][1][64] = 127, @@ -40603,6 +41515,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][64] = 127, [0][0][2][0][RTW89_UK][1][64] = 127, [0][0][2][0][RTW89_UK][0][64] = 127, + [0][0][2][0][RTW89_THAILAND][1][64] = 127, + [0][0][2][0][RTW89_THAILAND][0][64] = 127, [0][0][2][0][RTW89_FCC][1][66] = 22, [0][0][2][0][RTW89_FCC][2][66] = 68, [0][0][2][0][RTW89_ETSI][1][66] = 127, @@ -40610,6 +41524,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][66] = 127, [0][0][2][0][RTW89_MKK][0][66] = 127, [0][0][2][0][RTW89_IC][1][66] = 22, + [0][0][2][0][RTW89_IC][2][66] = 68, [0][0][2][0][RTW89_KCC][1][66] = 26, [0][0][2][0][RTW89_KCC][0][66] = 127, [0][0][2][0][RTW89_ACMA][1][66] = 127, @@ -40619,6 +41534,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][66] = 127, [0][0][2][0][RTW89_UK][1][66] = 127, [0][0][2][0][RTW89_UK][0][66] = 127, + [0][0][2][0][RTW89_THAILAND][1][66] = 127, + [0][0][2][0][RTW89_THAILAND][0][66] = 127, [0][0][2][0][RTW89_FCC][1][68] = 22, [0][0][2][0][RTW89_FCC][2][68] = 68, [0][0][2][0][RTW89_ETSI][1][68] = 127, @@ -40626,6 +41543,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][68] = 127, [0][0][2][0][RTW89_MKK][0][68] = 127, [0][0][2][0][RTW89_IC][1][68] = 22, + [0][0][2][0][RTW89_IC][2][68] = 68, [0][0][2][0][RTW89_KCC][1][68] = 26, [0][0][2][0][RTW89_KCC][0][68] = 127, [0][0][2][0][RTW89_ACMA][1][68] = 127, @@ -40635,6 +41553,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][68] = 127, [0][0][2][0][RTW89_UK][1][68] = 127, [0][0][2][0][RTW89_UK][0][68] = 127, + [0][0][2][0][RTW89_THAILAND][1][68] = 127, + [0][0][2][0][RTW89_THAILAND][0][68] = 127, [0][0][2][0][RTW89_FCC][1][70] = 24, [0][0][2][0][RTW89_FCC][2][70] = 68, [0][0][2][0][RTW89_ETSI][1][70] = 127, @@ -40642,6 +41562,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][70] = 127, [0][0][2][0][RTW89_MKK][0][70] = 127, [0][0][2][0][RTW89_IC][1][70] = 24, + [0][0][2][0][RTW89_IC][2][70] = 68, [0][0][2][0][RTW89_KCC][1][70] = 26, [0][0][2][0][RTW89_KCC][0][70] = 127, [0][0][2][0][RTW89_ACMA][1][70] = 127, @@ -40651,6 +41572,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][70] = 127, [0][0][2][0][RTW89_UK][1][70] = 127, [0][0][2][0][RTW89_UK][0][70] = 127, + [0][0][2][0][RTW89_THAILAND][1][70] = 127, + [0][0][2][0][RTW89_THAILAND][0][70] = 127, [0][0][2][0][RTW89_FCC][1][72] = 22, [0][0][2][0][RTW89_FCC][2][72] = 68, [0][0][2][0][RTW89_ETSI][1][72] = 127, @@ -40658,6 +41581,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][72] = 127, [0][0][2][0][RTW89_MKK][0][72] = 127, [0][0][2][0][RTW89_IC][1][72] = 22, + [0][0][2][0][RTW89_IC][2][72] = 68, [0][0][2][0][RTW89_KCC][1][72] = 26, [0][0][2][0][RTW89_KCC][0][72] = 127, [0][0][2][0][RTW89_ACMA][1][72] = 127, @@ -40667,6 +41591,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][72] = 127, [0][0][2][0][RTW89_UK][1][72] = 127, [0][0][2][0][RTW89_UK][0][72] = 127, + [0][0][2][0][RTW89_THAILAND][1][72] = 127, + [0][0][2][0][RTW89_THAILAND][0][72] = 127, [0][0][2][0][RTW89_FCC][1][74] = 22, [0][0][2][0][RTW89_FCC][2][74] = 68, [0][0][2][0][RTW89_ETSI][1][74] = 127, @@ -40674,6 +41600,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][74] = 127, [0][0][2][0][RTW89_MKK][0][74] = 127, [0][0][2][0][RTW89_IC][1][74] = 22, + [0][0][2][0][RTW89_IC][2][74] = 68, [0][0][2][0][RTW89_KCC][1][74] = 26, [0][0][2][0][RTW89_KCC][0][74] = 127, [0][0][2][0][RTW89_ACMA][1][74] = 127, @@ -40683,6 +41610,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][74] = 127, [0][0][2][0][RTW89_UK][1][74] = 127, [0][0][2][0][RTW89_UK][0][74] = 127, + [0][0][2][0][RTW89_THAILAND][1][74] = 127, + [0][0][2][0][RTW89_THAILAND][0][74] = 127, [0][0][2][0][RTW89_FCC][1][75] = 22, [0][0][2][0][RTW89_FCC][2][75] = 68, [0][0][2][0][RTW89_ETSI][1][75] = 127, @@ -40690,6 +41619,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][75] = 127, [0][0][2][0][RTW89_MKK][0][75] = 127, [0][0][2][0][RTW89_IC][1][75] = 22, + [0][0][2][0][RTW89_IC][2][75] = 68, [0][0][2][0][RTW89_KCC][1][75] = 26, [0][0][2][0][RTW89_KCC][0][75] = 127, [0][0][2][0][RTW89_ACMA][1][75] = 127, @@ -40699,6 +41629,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][75] = 127, [0][0][2][0][RTW89_UK][1][75] = 127, [0][0][2][0][RTW89_UK][0][75] = 127, + [0][0][2][0][RTW89_THAILAND][1][75] = 127, + [0][0][2][0][RTW89_THAILAND][0][75] = 127, [0][0][2][0][RTW89_FCC][1][77] = 22, [0][0][2][0][RTW89_FCC][2][77] = 68, [0][0][2][0][RTW89_ETSI][1][77] = 127, @@ -40706,6 +41638,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][77] = 127, [0][0][2][0][RTW89_MKK][0][77] = 127, [0][0][2][0][RTW89_IC][1][77] = 22, + [0][0][2][0][RTW89_IC][2][77] = 68, [0][0][2][0][RTW89_KCC][1][77] = 26, [0][0][2][0][RTW89_KCC][0][77] = 127, [0][0][2][0][RTW89_ACMA][1][77] = 127, @@ -40715,6 +41648,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][77] = 127, [0][0][2][0][RTW89_UK][1][77] = 127, [0][0][2][0][RTW89_UK][0][77] = 127, + [0][0][2][0][RTW89_THAILAND][1][77] = 127, + [0][0][2][0][RTW89_THAILAND][0][77] = 127, [0][0][2][0][RTW89_FCC][1][79] = 22, [0][0][2][0][RTW89_FCC][2][79] = 68, [0][0][2][0][RTW89_ETSI][1][79] = 127, @@ -40722,6 +41657,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][79] = 127, [0][0][2][0][RTW89_MKK][0][79] = 127, [0][0][2][0][RTW89_IC][1][79] = 22, + [0][0][2][0][RTW89_IC][2][79] = 68, [0][0][2][0][RTW89_KCC][1][79] = 26, [0][0][2][0][RTW89_KCC][0][79] = 127, [0][0][2][0][RTW89_ACMA][1][79] = 127, @@ -40731,6 +41667,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][79] = 127, [0][0][2][0][RTW89_UK][1][79] = 127, [0][0][2][0][RTW89_UK][0][79] = 127, + [0][0][2][0][RTW89_THAILAND][1][79] = 127, + [0][0][2][0][RTW89_THAILAND][0][79] = 127, [0][0][2][0][RTW89_FCC][1][81] = 22, [0][0][2][0][RTW89_FCC][2][81] = 68, [0][0][2][0][RTW89_ETSI][1][81] = 127, @@ -40738,6 +41676,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][81] = 127, [0][0][2][0][RTW89_MKK][0][81] = 127, [0][0][2][0][RTW89_IC][1][81] = 22, + [0][0][2][0][RTW89_IC][2][81] = 68, [0][0][2][0][RTW89_KCC][1][81] = 26, [0][0][2][0][RTW89_KCC][0][81] = 127, [0][0][2][0][RTW89_ACMA][1][81] = 127, @@ -40747,6 +41686,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][81] = 127, [0][0][2][0][RTW89_UK][1][81] = 127, [0][0][2][0][RTW89_UK][0][81] = 127, + [0][0][2][0][RTW89_THAILAND][1][81] = 127, + [0][0][2][0][RTW89_THAILAND][0][81] = 127, [0][0][2][0][RTW89_FCC][1][83] = 22, [0][0][2][0][RTW89_FCC][2][83] = 68, [0][0][2][0][RTW89_ETSI][1][83] = 127, @@ -40754,6 +41695,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][83] = 127, [0][0][2][0][RTW89_MKK][0][83] = 127, [0][0][2][0][RTW89_IC][1][83] = 22, + [0][0][2][0][RTW89_IC][2][83] = 68, [0][0][2][0][RTW89_KCC][1][83] = 32, [0][0][2][0][RTW89_KCC][0][83] = 127, [0][0][2][0][RTW89_ACMA][1][83] = 127, @@ -40763,6 +41705,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][83] = 127, [0][0][2][0][RTW89_UK][1][83] = 127, [0][0][2][0][RTW89_UK][0][83] = 127, + [0][0][2][0][RTW89_THAILAND][1][83] = 127, + [0][0][2][0][RTW89_THAILAND][0][83] = 127, [0][0][2][0][RTW89_FCC][1][85] = 22, [0][0][2][0][RTW89_FCC][2][85] = 68, [0][0][2][0][RTW89_ETSI][1][85] = 127, @@ -40770,6 +41714,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][85] = 127, [0][0][2][0][RTW89_MKK][0][85] = 127, [0][0][2][0][RTW89_IC][1][85] = 22, + [0][0][2][0][RTW89_IC][2][85] = 68, [0][0][2][0][RTW89_KCC][1][85] = 32, [0][0][2][0][RTW89_KCC][0][85] = 127, [0][0][2][0][RTW89_ACMA][1][85] = 127, @@ -40779,6 +41724,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][85] = 127, [0][0][2][0][RTW89_UK][1][85] = 127, [0][0][2][0][RTW89_UK][0][85] = 127, + [0][0][2][0][RTW89_THAILAND][1][85] = 127, + [0][0][2][0][RTW89_THAILAND][0][85] = 127, [0][0][2][0][RTW89_FCC][1][87] = 22, [0][0][2][0][RTW89_FCC][2][87] = 127, [0][0][2][0][RTW89_ETSI][1][87] = 127, @@ -40786,6 +41733,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][87] = 127, [0][0][2][0][RTW89_MKK][0][87] = 127, [0][0][2][0][RTW89_IC][1][87] = 22, + [0][0][2][0][RTW89_IC][2][87] = 127, [0][0][2][0][RTW89_KCC][1][87] = 32, [0][0][2][0][RTW89_KCC][0][87] = 127, [0][0][2][0][RTW89_ACMA][1][87] = 127, @@ -40795,6 +41743,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][87] = 127, [0][0][2][0][RTW89_UK][1][87] = 127, [0][0][2][0][RTW89_UK][0][87] = 127, + [0][0][2][0][RTW89_THAILAND][1][87] = 127, + [0][0][2][0][RTW89_THAILAND][0][87] = 127, [0][0][2][0][RTW89_FCC][1][89] = 22, [0][0][2][0][RTW89_FCC][2][89] = 127, [0][0][2][0][RTW89_ETSI][1][89] = 127, @@ -40802,6 +41752,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][89] = 127, [0][0][2][0][RTW89_MKK][0][89] = 127, [0][0][2][0][RTW89_IC][1][89] = 22, + [0][0][2][0][RTW89_IC][2][89] = 127, [0][0][2][0][RTW89_KCC][1][89] = 32, [0][0][2][0][RTW89_KCC][0][89] = 127, [0][0][2][0][RTW89_ACMA][1][89] = 127, @@ -40811,6 +41762,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][89] = 127, [0][0][2][0][RTW89_UK][1][89] = 127, [0][0][2][0][RTW89_UK][0][89] = 127, + [0][0][2][0][RTW89_THAILAND][1][89] = 127, + [0][0][2][0][RTW89_THAILAND][0][89] = 127, [0][0][2][0][RTW89_FCC][1][90] = 22, [0][0][2][0][RTW89_FCC][2][90] = 127, [0][0][2][0][RTW89_ETSI][1][90] = 127, @@ -40818,6 +41771,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][90] = 127, [0][0][2][0][RTW89_MKK][0][90] = 127, [0][0][2][0][RTW89_IC][1][90] = 22, + [0][0][2][0][RTW89_IC][2][90] = 127, [0][0][2][0][RTW89_KCC][1][90] = 32, [0][0][2][0][RTW89_KCC][0][90] = 127, [0][0][2][0][RTW89_ACMA][1][90] = 127, @@ -40827,6 +41781,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][90] = 127, [0][0][2][0][RTW89_UK][1][90] = 127, [0][0][2][0][RTW89_UK][0][90] = 127, + [0][0][2][0][RTW89_THAILAND][1][90] = 127, + [0][0][2][0][RTW89_THAILAND][0][90] = 127, [0][0][2][0][RTW89_FCC][1][92] = 22, [0][0][2][0][RTW89_FCC][2][92] = 127, [0][0][2][0][RTW89_ETSI][1][92] = 127, @@ -40834,6 +41790,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][92] = 127, [0][0][2][0][RTW89_MKK][0][92] = 127, [0][0][2][0][RTW89_IC][1][92] = 22, + [0][0][2][0][RTW89_IC][2][92] = 127, [0][0][2][0][RTW89_KCC][1][92] = 32, [0][0][2][0][RTW89_KCC][0][92] = 127, [0][0][2][0][RTW89_ACMA][1][92] = 127, @@ -40843,6 +41800,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][92] = 127, [0][0][2][0][RTW89_UK][1][92] = 127, [0][0][2][0][RTW89_UK][0][92] = 127, + [0][0][2][0][RTW89_THAILAND][1][92] = 127, + [0][0][2][0][RTW89_THAILAND][0][92] = 127, [0][0][2][0][RTW89_FCC][1][94] = 22, [0][0][2][0][RTW89_FCC][2][94] = 127, [0][0][2][0][RTW89_ETSI][1][94] = 127, @@ -40850,6 +41809,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][94] = 127, [0][0][2][0][RTW89_MKK][0][94] = 127, [0][0][2][0][RTW89_IC][1][94] = 22, + [0][0][2][0][RTW89_IC][2][94] = 127, [0][0][2][0][RTW89_KCC][1][94] = 32, [0][0][2][0][RTW89_KCC][0][94] = 127, [0][0][2][0][RTW89_ACMA][1][94] = 127, @@ -40859,6 +41819,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][94] = 127, [0][0][2][0][RTW89_UK][1][94] = 127, [0][0][2][0][RTW89_UK][0][94] = 127, + [0][0][2][0][RTW89_THAILAND][1][94] = 127, + [0][0][2][0][RTW89_THAILAND][0][94] = 127, [0][0][2][0][RTW89_FCC][1][96] = 22, [0][0][2][0][RTW89_FCC][2][96] = 127, [0][0][2][0][RTW89_ETSI][1][96] = 127, @@ -40866,6 +41828,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][96] = 127, [0][0][2][0][RTW89_MKK][0][96] = 127, [0][0][2][0][RTW89_IC][1][96] = 22, + [0][0][2][0][RTW89_IC][2][96] = 127, [0][0][2][0][RTW89_KCC][1][96] = 32, [0][0][2][0][RTW89_KCC][0][96] = 127, [0][0][2][0][RTW89_ACMA][1][96] = 127, @@ -40875,6 +41838,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][96] = 127, [0][0][2][0][RTW89_UK][1][96] = 127, [0][0][2][0][RTW89_UK][0][96] = 127, + [0][0][2][0][RTW89_THAILAND][1][96] = 127, + [0][0][2][0][RTW89_THAILAND][0][96] = 127, [0][0][2][0][RTW89_FCC][1][98] = 22, [0][0][2][0][RTW89_FCC][2][98] = 127, [0][0][2][0][RTW89_ETSI][1][98] = 127, @@ -40882,6 +41847,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][98] = 127, [0][0][2][0][RTW89_MKK][0][98] = 127, [0][0][2][0][RTW89_IC][1][98] = 22, + [0][0][2][0][RTW89_IC][2][98] = 127, [0][0][2][0][RTW89_KCC][1][98] = 32, [0][0][2][0][RTW89_KCC][0][98] = 127, [0][0][2][0][RTW89_ACMA][1][98] = 127, @@ -40891,6 +41857,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][98] = 127, [0][0][2][0][RTW89_UK][1][98] = 127, [0][0][2][0][RTW89_UK][0][98] = 127, + [0][0][2][0][RTW89_THAILAND][1][98] = 127, + [0][0][2][0][RTW89_THAILAND][0][98] = 127, [0][0][2][0][RTW89_FCC][1][100] = 22, [0][0][2][0][RTW89_FCC][2][100] = 127, [0][0][2][0][RTW89_ETSI][1][100] = 127, @@ -40898,6 +41866,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][100] = 127, [0][0][2][0][RTW89_MKK][0][100] = 127, [0][0][2][0][RTW89_IC][1][100] = 22, + [0][0][2][0][RTW89_IC][2][100] = 127, [0][0][2][0][RTW89_KCC][1][100] = 32, [0][0][2][0][RTW89_KCC][0][100] = 127, [0][0][2][0][RTW89_ACMA][1][100] = 127, @@ -40907,6 +41876,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][100] = 127, [0][0][2][0][RTW89_UK][1][100] = 127, [0][0][2][0][RTW89_UK][0][100] = 127, + [0][0][2][0][RTW89_THAILAND][1][100] = 127, + [0][0][2][0][RTW89_THAILAND][0][100] = 127, [0][0][2][0][RTW89_FCC][1][102] = 22, [0][0][2][0][RTW89_FCC][2][102] = 127, [0][0][2][0][RTW89_ETSI][1][102] = 127, @@ -40914,6 +41885,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][102] = 127, [0][0][2][0][RTW89_MKK][0][102] = 127, [0][0][2][0][RTW89_IC][1][102] = 22, + [0][0][2][0][RTW89_IC][2][102] = 127, [0][0][2][0][RTW89_KCC][1][102] = 32, [0][0][2][0][RTW89_KCC][0][102] = 127, [0][0][2][0][RTW89_ACMA][1][102] = 127, @@ -40923,6 +41895,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][102] = 127, [0][0][2][0][RTW89_UK][1][102] = 127, [0][0][2][0][RTW89_UK][0][102] = 127, + [0][0][2][0][RTW89_THAILAND][1][102] = 127, + [0][0][2][0][RTW89_THAILAND][0][102] = 127, [0][0][2][0][RTW89_FCC][1][104] = 22, [0][0][2][0][RTW89_FCC][2][104] = 127, [0][0][2][0][RTW89_ETSI][1][104] = 127, @@ -40930,6 +41904,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][104] = 127, [0][0][2][0][RTW89_MKK][0][104] = 127, [0][0][2][0][RTW89_IC][1][104] = 22, + [0][0][2][0][RTW89_IC][2][104] = 127, [0][0][2][0][RTW89_KCC][1][104] = 32, [0][0][2][0][RTW89_KCC][0][104] = 127, [0][0][2][0][RTW89_ACMA][1][104] = 127, @@ -40939,6 +41914,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][104] = 127, [0][0][2][0][RTW89_UK][1][104] = 127, [0][0][2][0][RTW89_UK][0][104] = 127, + [0][0][2][0][RTW89_THAILAND][1][104] = 127, + [0][0][2][0][RTW89_THAILAND][0][104] = 127, [0][0][2][0][RTW89_FCC][1][105] = 22, [0][0][2][0][RTW89_FCC][2][105] = 127, [0][0][2][0][RTW89_ETSI][1][105] = 127, @@ -40946,6 +41923,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][105] = 127, [0][0][2][0][RTW89_MKK][0][105] = 127, [0][0][2][0][RTW89_IC][1][105] = 22, + [0][0][2][0][RTW89_IC][2][105] = 127, [0][0][2][0][RTW89_KCC][1][105] = 32, [0][0][2][0][RTW89_KCC][0][105] = 127, [0][0][2][0][RTW89_ACMA][1][105] = 127, @@ -40955,6 +41933,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][105] = 127, [0][0][2][0][RTW89_UK][1][105] = 127, [0][0][2][0][RTW89_UK][0][105] = 127, + [0][0][2][0][RTW89_THAILAND][1][105] = 127, + [0][0][2][0][RTW89_THAILAND][0][105] = 127, [0][0][2][0][RTW89_FCC][1][107] = 24, [0][0][2][0][RTW89_FCC][2][107] = 127, [0][0][2][0][RTW89_ETSI][1][107] = 127, @@ -40962,6 +41942,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][107] = 127, [0][0][2][0][RTW89_MKK][0][107] = 127, [0][0][2][0][RTW89_IC][1][107] = 24, + [0][0][2][0][RTW89_IC][2][107] = 127, [0][0][2][0][RTW89_KCC][1][107] = 32, [0][0][2][0][RTW89_KCC][0][107] = 127, [0][0][2][0][RTW89_ACMA][1][107] = 127, @@ -40971,6 +41952,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][107] = 127, [0][0][2][0][RTW89_UK][1][107] = 127, [0][0][2][0][RTW89_UK][0][107] = 127, + [0][0][2][0][RTW89_THAILAND][1][107] = 127, + [0][0][2][0][RTW89_THAILAND][0][107] = 127, [0][0][2][0][RTW89_FCC][1][109] = 24, [0][0][2][0][RTW89_FCC][2][109] = 127, [0][0][2][0][RTW89_ETSI][1][109] = 127, @@ -40978,6 +41961,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][109] = 127, [0][0][2][0][RTW89_MKK][0][109] = 127, [0][0][2][0][RTW89_IC][1][109] = 24, + [0][0][2][0][RTW89_IC][2][109] = 127, [0][0][2][0][RTW89_KCC][1][109] = 32, [0][0][2][0][RTW89_KCC][0][109] = 127, [0][0][2][0][RTW89_ACMA][1][109] = 127, @@ -40987,6 +41971,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][109] = 127, [0][0][2][0][RTW89_UK][1][109] = 127, [0][0][2][0][RTW89_UK][0][109] = 127, + [0][0][2][0][RTW89_THAILAND][1][109] = 127, + [0][0][2][0][RTW89_THAILAND][0][109] = 127, [0][0][2][0][RTW89_FCC][1][111] = 127, [0][0][2][0][RTW89_FCC][2][111] = 127, [0][0][2][0][RTW89_ETSI][1][111] = 127, @@ -40994,6 +41980,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][111] = 127, [0][0][2][0][RTW89_MKK][0][111] = 127, [0][0][2][0][RTW89_IC][1][111] = 127, + [0][0][2][0][RTW89_IC][2][111] = 127, [0][0][2][0][RTW89_KCC][1][111] = 127, [0][0][2][0][RTW89_KCC][0][111] = 127, [0][0][2][0][RTW89_ACMA][1][111] = 127, @@ -41003,6 +41990,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][111] = 127, [0][0][2][0][RTW89_UK][1][111] = 127, [0][0][2][0][RTW89_UK][0][111] = 127, + [0][0][2][0][RTW89_THAILAND][1][111] = 127, + [0][0][2][0][RTW89_THAILAND][0][111] = 127, [0][0][2][0][RTW89_FCC][1][113] = 127, [0][0][2][0][RTW89_FCC][2][113] = 127, [0][0][2][0][RTW89_ETSI][1][113] = 127, @@ -41010,6 +41999,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][113] = 127, [0][0][2][0][RTW89_MKK][0][113] = 127, [0][0][2][0][RTW89_IC][1][113] = 127, + [0][0][2][0][RTW89_IC][2][113] = 127, [0][0][2][0][RTW89_KCC][1][113] = 127, [0][0][2][0][RTW89_KCC][0][113] = 127, [0][0][2][0][RTW89_ACMA][1][113] = 127, @@ -41019,6 +42009,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][113] = 127, [0][0][2][0][RTW89_UK][1][113] = 127, [0][0][2][0][RTW89_UK][0][113] = 127, + [0][0][2][0][RTW89_THAILAND][1][113] = 127, + [0][0][2][0][RTW89_THAILAND][0][113] = 127, [0][0][2][0][RTW89_FCC][1][115] = 127, [0][0][2][0][RTW89_FCC][2][115] = 127, [0][0][2][0][RTW89_ETSI][1][115] = 127, @@ -41026,6 +42018,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][115] = 127, [0][0][2][0][RTW89_MKK][0][115] = 127, [0][0][2][0][RTW89_IC][1][115] = 127, + [0][0][2][0][RTW89_IC][2][115] = 127, [0][0][2][0][RTW89_KCC][1][115] = 127, [0][0][2][0][RTW89_KCC][0][115] = 127, [0][0][2][0][RTW89_ACMA][1][115] = 127, @@ -41035,6 +42028,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][115] = 127, [0][0][2][0][RTW89_UK][1][115] = 127, [0][0][2][0][RTW89_UK][0][115] = 127, + [0][0][2][0][RTW89_THAILAND][1][115] = 127, + [0][0][2][0][RTW89_THAILAND][0][115] = 127, [0][0][2][0][RTW89_FCC][1][117] = 127, [0][0][2][0][RTW89_FCC][2][117] = 127, [0][0][2][0][RTW89_ETSI][1][117] = 127, @@ -41042,6 +42037,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][117] = 127, [0][0][2][0][RTW89_MKK][0][117] = 127, [0][0][2][0][RTW89_IC][1][117] = 127, + [0][0][2][0][RTW89_IC][2][117] = 127, [0][0][2][0][RTW89_KCC][1][117] = 127, [0][0][2][0][RTW89_KCC][0][117] = 127, [0][0][2][0][RTW89_ACMA][1][117] = 127, @@ -41051,6 +42047,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][117] = 127, [0][0][2][0][RTW89_UK][1][117] = 127, [0][0][2][0][RTW89_UK][0][117] = 127, + [0][0][2][0][RTW89_THAILAND][1][117] = 127, + [0][0][2][0][RTW89_THAILAND][0][117] = 127, [0][0][2][0][RTW89_FCC][1][119] = 127, [0][0][2][0][RTW89_FCC][2][119] = 127, [0][0][2][0][RTW89_ETSI][1][119] = 127, @@ -41058,6 +42056,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][119] = 127, [0][0][2][0][RTW89_MKK][0][119] = 127, [0][0][2][0][RTW89_IC][1][119] = 127, + [0][0][2][0][RTW89_IC][2][119] = 127, [0][0][2][0][RTW89_KCC][1][119] = 127, [0][0][2][0][RTW89_KCC][0][119] = 127, [0][0][2][0][RTW89_ACMA][1][119] = 127, @@ -41067,6 +42066,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][119] = 127, [0][0][2][0][RTW89_UK][1][119] = 127, [0][0][2][0][RTW89_UK][0][119] = 127, + [0][0][2][0][RTW89_THAILAND][1][119] = 127, + [0][0][2][0][RTW89_THAILAND][0][119] = 127, [0][1][2][0][RTW89_FCC][1][0] = -2, [0][1][2][0][RTW89_FCC][2][0] = 54, [0][1][2][0][RTW89_ETSI][1][0] = 54, @@ -41074,6 +42075,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][0] = 56, [0][1][2][0][RTW89_MKK][0][0] = 16, [0][1][2][0][RTW89_IC][1][0] = -2, + [0][1][2][0][RTW89_IC][2][0] = 54, [0][1][2][0][RTW89_KCC][1][0] = 12, [0][1][2][0][RTW89_KCC][0][0] = 10, [0][1][2][0][RTW89_ACMA][1][0] = 54, @@ -41083,6 +42085,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][0] = 18, [0][1][2][0][RTW89_UK][1][0] = 54, [0][1][2][0][RTW89_UK][0][0] = 18, + [0][1][2][0][RTW89_THAILAND][1][0] = 44, + [0][1][2][0][RTW89_THAILAND][0][0] = -2, [0][1][2][0][RTW89_FCC][1][2] = -4, [0][1][2][0][RTW89_FCC][2][2] = 54, [0][1][2][0][RTW89_ETSI][1][2] = 54, @@ -41090,6 +42094,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][2] = 54, [0][1][2][0][RTW89_MKK][0][2] = 16, [0][1][2][0][RTW89_IC][1][2] = -4, + [0][1][2][0][RTW89_IC][2][2] = 54, [0][1][2][0][RTW89_KCC][1][2] = 12, [0][1][2][0][RTW89_KCC][0][2] = 12, [0][1][2][0][RTW89_ACMA][1][2] = 54, @@ -41099,6 +42104,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][2] = 18, [0][1][2][0][RTW89_UK][1][2] = 54, [0][1][2][0][RTW89_UK][0][2] = 18, + [0][1][2][0][RTW89_THAILAND][1][2] = 44, + [0][1][2][0][RTW89_THAILAND][0][2] = -4, [0][1][2][0][RTW89_FCC][1][4] = -4, [0][1][2][0][RTW89_FCC][2][4] = 54, [0][1][2][0][RTW89_ETSI][1][4] = 54, @@ -41106,6 +42113,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][4] = 54, [0][1][2][0][RTW89_MKK][0][4] = 16, [0][1][2][0][RTW89_IC][1][4] = -4, + [0][1][2][0][RTW89_IC][2][4] = 54, [0][1][2][0][RTW89_KCC][1][4] = 12, [0][1][2][0][RTW89_KCC][0][4] = 12, [0][1][2][0][RTW89_ACMA][1][4] = 54, @@ -41115,6 +42123,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][4] = 18, [0][1][2][0][RTW89_UK][1][4] = 54, [0][1][2][0][RTW89_UK][0][4] = 18, + [0][1][2][0][RTW89_THAILAND][1][4] = 44, + [0][1][2][0][RTW89_THAILAND][0][4] = -4, [0][1][2][0][RTW89_FCC][1][6] = -4, [0][1][2][0][RTW89_FCC][2][6] = 54, [0][1][2][0][RTW89_ETSI][1][6] = 54, @@ -41122,6 +42132,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][6] = 54, [0][1][2][0][RTW89_MKK][0][6] = 16, [0][1][2][0][RTW89_IC][1][6] = -4, + [0][1][2][0][RTW89_IC][2][6] = 54, [0][1][2][0][RTW89_KCC][1][6] = 12, [0][1][2][0][RTW89_KCC][0][6] = 12, [0][1][2][0][RTW89_ACMA][1][6] = 54, @@ -41131,6 +42142,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][6] = 18, [0][1][2][0][RTW89_UK][1][6] = 54, [0][1][2][0][RTW89_UK][0][6] = 18, + [0][1][2][0][RTW89_THAILAND][1][6] = 44, + [0][1][2][0][RTW89_THAILAND][0][6] = -4, [0][1][2][0][RTW89_FCC][1][8] = -4, [0][1][2][0][RTW89_FCC][2][8] = 54, [0][1][2][0][RTW89_ETSI][1][8] = 54, @@ -41138,6 +42151,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][8] = 54, [0][1][2][0][RTW89_MKK][0][8] = 16, [0][1][2][0][RTW89_IC][1][8] = -4, + [0][1][2][0][RTW89_IC][2][8] = 54, [0][1][2][0][RTW89_KCC][1][8] = 12, [0][1][2][0][RTW89_KCC][0][8] = 12, [0][1][2][0][RTW89_ACMA][1][8] = 54, @@ -41147,6 +42161,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][8] = 18, [0][1][2][0][RTW89_UK][1][8] = 54, [0][1][2][0][RTW89_UK][0][8] = 18, + [0][1][2][0][RTW89_THAILAND][1][8] = 44, + [0][1][2][0][RTW89_THAILAND][0][8] = -4, [0][1][2][0][RTW89_FCC][1][10] = -4, [0][1][2][0][RTW89_FCC][2][10] = 54, [0][1][2][0][RTW89_ETSI][1][10] = 54, @@ -41154,6 +42170,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][10] = 54, [0][1][2][0][RTW89_MKK][0][10] = 16, [0][1][2][0][RTW89_IC][1][10] = -4, + [0][1][2][0][RTW89_IC][2][10] = 54, [0][1][2][0][RTW89_KCC][1][10] = 12, [0][1][2][0][RTW89_KCC][0][10] = 12, [0][1][2][0][RTW89_ACMA][1][10] = 54, @@ -41163,6 +42180,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][10] = 18, [0][1][2][0][RTW89_UK][1][10] = 54, [0][1][2][0][RTW89_UK][0][10] = 18, + [0][1][2][0][RTW89_THAILAND][1][10] = 44, + [0][1][2][0][RTW89_THAILAND][0][10] = -4, [0][1][2][0][RTW89_FCC][1][12] = -4, [0][1][2][0][RTW89_FCC][2][12] = 54, [0][1][2][0][RTW89_ETSI][1][12] = 54, @@ -41170,6 +42189,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][12] = 54, [0][1][2][0][RTW89_MKK][0][12] = 16, [0][1][2][0][RTW89_IC][1][12] = -4, + [0][1][2][0][RTW89_IC][2][12] = 54, [0][1][2][0][RTW89_KCC][1][12] = 12, [0][1][2][0][RTW89_KCC][0][12] = 12, [0][1][2][0][RTW89_ACMA][1][12] = 54, @@ -41179,6 +42199,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][12] = 18, [0][1][2][0][RTW89_UK][1][12] = 54, [0][1][2][0][RTW89_UK][0][12] = 18, + [0][1][2][0][RTW89_THAILAND][1][12] = 44, + [0][1][2][0][RTW89_THAILAND][0][12] = -4, [0][1][2][0][RTW89_FCC][1][14] = -4, [0][1][2][0][RTW89_FCC][2][14] = 54, [0][1][2][0][RTW89_ETSI][1][14] = 54, @@ -41186,6 +42208,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][14] = 54, [0][1][2][0][RTW89_MKK][0][14] = 16, [0][1][2][0][RTW89_IC][1][14] = -4, + [0][1][2][0][RTW89_IC][2][14] = 54, [0][1][2][0][RTW89_KCC][1][14] = 12, [0][1][2][0][RTW89_KCC][0][14] = 12, [0][1][2][0][RTW89_ACMA][1][14] = 54, @@ -41195,6 +42218,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][14] = 18, [0][1][2][0][RTW89_UK][1][14] = 54, [0][1][2][0][RTW89_UK][0][14] = 18, + [0][1][2][0][RTW89_THAILAND][1][14] = 44, + [0][1][2][0][RTW89_THAILAND][0][14] = -4, [0][1][2][0][RTW89_FCC][1][15] = -4, [0][1][2][0][RTW89_FCC][2][15] = 54, [0][1][2][0][RTW89_ETSI][1][15] = 54, @@ -41202,6 +42227,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][15] = 54, [0][1][2][0][RTW89_MKK][0][15] = 16, [0][1][2][0][RTW89_IC][1][15] = -4, + [0][1][2][0][RTW89_IC][2][15] = 54, [0][1][2][0][RTW89_KCC][1][15] = 12, [0][1][2][0][RTW89_KCC][0][15] = 12, [0][1][2][0][RTW89_ACMA][1][15] = 54, @@ -41211,6 +42237,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][15] = 18, [0][1][2][0][RTW89_UK][1][15] = 54, [0][1][2][0][RTW89_UK][0][15] = 18, + [0][1][2][0][RTW89_THAILAND][1][15] = 44, + [0][1][2][0][RTW89_THAILAND][0][15] = -4, [0][1][2][0][RTW89_FCC][1][17] = -4, [0][1][2][0][RTW89_FCC][2][17] = 54, [0][1][2][0][RTW89_ETSI][1][17] = 54, @@ -41218,6 +42246,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][17] = 54, [0][1][2][0][RTW89_MKK][0][17] = 16, [0][1][2][0][RTW89_IC][1][17] = -4, + [0][1][2][0][RTW89_IC][2][17] = 54, [0][1][2][0][RTW89_KCC][1][17] = 12, [0][1][2][0][RTW89_KCC][0][17] = 12, [0][1][2][0][RTW89_ACMA][1][17] = 54, @@ -41227,6 +42256,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][17] = 18, [0][1][2][0][RTW89_UK][1][17] = 54, [0][1][2][0][RTW89_UK][0][17] = 18, + [0][1][2][0][RTW89_THAILAND][1][17] = 44, + [0][1][2][0][RTW89_THAILAND][0][17] = -4, [0][1][2][0][RTW89_FCC][1][19] = -4, [0][1][2][0][RTW89_FCC][2][19] = 54, [0][1][2][0][RTW89_ETSI][1][19] = 54, @@ -41234,6 +42265,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][19] = 54, [0][1][2][0][RTW89_MKK][0][19] = 16, [0][1][2][0][RTW89_IC][1][19] = -4, + [0][1][2][0][RTW89_IC][2][19] = 54, [0][1][2][0][RTW89_KCC][1][19] = 12, [0][1][2][0][RTW89_KCC][0][19] = 12, [0][1][2][0][RTW89_ACMA][1][19] = 54, @@ -41243,6 +42275,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][19] = 18, [0][1][2][0][RTW89_UK][1][19] = 54, [0][1][2][0][RTW89_UK][0][19] = 18, + [0][1][2][0][RTW89_THAILAND][1][19] = 44, + [0][1][2][0][RTW89_THAILAND][0][19] = -4, [0][1][2][0][RTW89_FCC][1][21] = -4, [0][1][2][0][RTW89_FCC][2][21] = 54, [0][1][2][0][RTW89_ETSI][1][21] = 54, @@ -41250,6 +42284,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][21] = 54, [0][1][2][0][RTW89_MKK][0][21] = 16, [0][1][2][0][RTW89_IC][1][21] = -4, + [0][1][2][0][RTW89_IC][2][21] = 54, [0][1][2][0][RTW89_KCC][1][21] = 12, [0][1][2][0][RTW89_KCC][0][21] = 12, [0][1][2][0][RTW89_ACMA][1][21] = 54, @@ -41259,6 +42294,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][21] = 18, [0][1][2][0][RTW89_UK][1][21] = 54, [0][1][2][0][RTW89_UK][0][21] = 18, + [0][1][2][0][RTW89_THAILAND][1][21] = 44, + [0][1][2][0][RTW89_THAILAND][0][21] = -4, [0][1][2][0][RTW89_FCC][1][23] = -4, [0][1][2][0][RTW89_FCC][2][23] = 68, [0][1][2][0][RTW89_ETSI][1][23] = 54, @@ -41266,6 +42303,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][23] = 54, [0][1][2][0][RTW89_MKK][0][23] = 16, [0][1][2][0][RTW89_IC][1][23] = -4, + [0][1][2][0][RTW89_IC][2][23] = 68, [0][1][2][0][RTW89_KCC][1][23] = 12, [0][1][2][0][RTW89_KCC][0][23] = 10, [0][1][2][0][RTW89_ACMA][1][23] = 54, @@ -41275,6 +42313,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][23] = 18, [0][1][2][0][RTW89_UK][1][23] = 54, [0][1][2][0][RTW89_UK][0][23] = 18, + [0][1][2][0][RTW89_THAILAND][1][23] = 44, + [0][1][2][0][RTW89_THAILAND][0][23] = -4, [0][1][2][0][RTW89_FCC][1][25] = -4, [0][1][2][0][RTW89_FCC][2][25] = 68, [0][1][2][0][RTW89_ETSI][1][25] = 54, @@ -41282,6 +42322,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][25] = 54, [0][1][2][0][RTW89_MKK][0][25] = 16, [0][1][2][0][RTW89_IC][1][25] = -4, + [0][1][2][0][RTW89_IC][2][25] = 68, [0][1][2][0][RTW89_KCC][1][25] = 12, [0][1][2][0][RTW89_KCC][0][25] = 14, [0][1][2][0][RTW89_ACMA][1][25] = 54, @@ -41291,6 +42332,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][25] = 18, [0][1][2][0][RTW89_UK][1][25] = 54, [0][1][2][0][RTW89_UK][0][25] = 18, + [0][1][2][0][RTW89_THAILAND][1][25] = 42, + [0][1][2][0][RTW89_THAILAND][0][25] = -4, [0][1][2][0][RTW89_FCC][1][27] = -4, [0][1][2][0][RTW89_FCC][2][27] = 68, [0][1][2][0][RTW89_ETSI][1][27] = 54, @@ -41298,6 +42341,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][27] = 54, [0][1][2][0][RTW89_MKK][0][27] = 16, [0][1][2][0][RTW89_IC][1][27] = -4, + [0][1][2][0][RTW89_IC][2][27] = 68, [0][1][2][0][RTW89_KCC][1][27] = 12, [0][1][2][0][RTW89_KCC][0][27] = 14, [0][1][2][0][RTW89_ACMA][1][27] = 54, @@ -41307,6 +42351,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][27] = 18, [0][1][2][0][RTW89_UK][1][27] = 54, [0][1][2][0][RTW89_UK][0][27] = 18, + [0][1][2][0][RTW89_THAILAND][1][27] = 42, + [0][1][2][0][RTW89_THAILAND][0][27] = -4, [0][1][2][0][RTW89_FCC][1][29] = -4, [0][1][2][0][RTW89_FCC][2][29] = 68, [0][1][2][0][RTW89_ETSI][1][29] = 54, @@ -41314,6 +42360,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][29] = 54, [0][1][2][0][RTW89_MKK][0][29] = 16, [0][1][2][0][RTW89_IC][1][29] = -4, + [0][1][2][0][RTW89_IC][2][29] = 68, [0][1][2][0][RTW89_KCC][1][29] = 12, [0][1][2][0][RTW89_KCC][0][29] = 14, [0][1][2][0][RTW89_ACMA][1][29] = 54, @@ -41323,6 +42370,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][29] = 18, [0][1][2][0][RTW89_UK][1][29] = 54, [0][1][2][0][RTW89_UK][0][29] = 18, + [0][1][2][0][RTW89_THAILAND][1][29] = 42, + [0][1][2][0][RTW89_THAILAND][0][29] = -4, [0][1][2][0][RTW89_FCC][1][30] = -4, [0][1][2][0][RTW89_FCC][2][30] = 68, [0][1][2][0][RTW89_ETSI][1][30] = 54, @@ -41330,6 +42379,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][30] = 54, [0][1][2][0][RTW89_MKK][0][30] = 16, [0][1][2][0][RTW89_IC][1][30] = -4, + [0][1][2][0][RTW89_IC][2][30] = 68, [0][1][2][0][RTW89_KCC][1][30] = 12, [0][1][2][0][RTW89_KCC][0][30] = 14, [0][1][2][0][RTW89_ACMA][1][30] = 54, @@ -41339,6 +42389,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][30] = 18, [0][1][2][0][RTW89_UK][1][30] = 54, [0][1][2][0][RTW89_UK][0][30] = 18, + [0][1][2][0][RTW89_THAILAND][1][30] = 42, + [0][1][2][0][RTW89_THAILAND][0][30] = -4, [0][1][2][0][RTW89_FCC][1][32] = -4, [0][1][2][0][RTW89_FCC][2][32] = 68, [0][1][2][0][RTW89_ETSI][1][32] = 54, @@ -41346,6 +42398,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][32] = 54, [0][1][2][0][RTW89_MKK][0][32] = 16, [0][1][2][0][RTW89_IC][1][32] = -4, + [0][1][2][0][RTW89_IC][2][32] = 68, [0][1][2][0][RTW89_KCC][1][32] = 12, [0][1][2][0][RTW89_KCC][0][32] = 14, [0][1][2][0][RTW89_ACMA][1][32] = 54, @@ -41355,6 +42408,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][32] = 18, [0][1][2][0][RTW89_UK][1][32] = 54, [0][1][2][0][RTW89_UK][0][32] = 18, + [0][1][2][0][RTW89_THAILAND][1][32] = 42, + [0][1][2][0][RTW89_THAILAND][0][32] = -4, [0][1][2][0][RTW89_FCC][1][34] = -4, [0][1][2][0][RTW89_FCC][2][34] = 68, [0][1][2][0][RTW89_ETSI][1][34] = 54, @@ -41362,6 +42417,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][34] = 54, [0][1][2][0][RTW89_MKK][0][34] = 16, [0][1][2][0][RTW89_IC][1][34] = -4, + [0][1][2][0][RTW89_IC][2][34] = 68, [0][1][2][0][RTW89_KCC][1][34] = 12, [0][1][2][0][RTW89_KCC][0][34] = 14, [0][1][2][0][RTW89_ACMA][1][34] = 54, @@ -41371,6 +42427,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][34] = 18, [0][1][2][0][RTW89_UK][1][34] = 54, [0][1][2][0][RTW89_UK][0][34] = 18, + [0][1][2][0][RTW89_THAILAND][1][34] = 42, + [0][1][2][0][RTW89_THAILAND][0][34] = -4, [0][1][2][0][RTW89_FCC][1][36] = -4, [0][1][2][0][RTW89_FCC][2][36] = 68, [0][1][2][0][RTW89_ETSI][1][36] = 54, @@ -41378,6 +42436,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][36] = 54, [0][1][2][0][RTW89_MKK][0][36] = 16, [0][1][2][0][RTW89_IC][1][36] = -4, + [0][1][2][0][RTW89_IC][2][36] = 68, [0][1][2][0][RTW89_KCC][1][36] = 12, [0][1][2][0][RTW89_KCC][0][36] = 14, [0][1][2][0][RTW89_ACMA][1][36] = 54, @@ -41387,6 +42446,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][36] = 18, [0][1][2][0][RTW89_UK][1][36] = 54, [0][1][2][0][RTW89_UK][0][36] = 18, + [0][1][2][0][RTW89_THAILAND][1][36] = 42, + [0][1][2][0][RTW89_THAILAND][0][36] = -4, [0][1][2][0][RTW89_FCC][1][38] = -4, [0][1][2][0][RTW89_FCC][2][38] = 68, [0][1][2][0][RTW89_ETSI][1][38] = 54, @@ -41394,6 +42455,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][38] = 54, [0][1][2][0][RTW89_MKK][0][38] = 16, [0][1][2][0][RTW89_IC][1][38] = -4, + [0][1][2][0][RTW89_IC][2][38] = 68, [0][1][2][0][RTW89_KCC][1][38] = 12, [0][1][2][0][RTW89_KCC][0][38] = 14, [0][1][2][0][RTW89_ACMA][1][38] = 54, @@ -41403,6 +42465,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][38] = 18, [0][1][2][0][RTW89_UK][1][38] = 54, [0][1][2][0][RTW89_UK][0][38] = 18, + [0][1][2][0][RTW89_THAILAND][1][38] = 42, + [0][1][2][0][RTW89_THAILAND][0][38] = -4, [0][1][2][0][RTW89_FCC][1][40] = -4, [0][1][2][0][RTW89_FCC][2][40] = 68, [0][1][2][0][RTW89_ETSI][1][40] = 54, @@ -41410,6 +42474,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][40] = 54, [0][1][2][0][RTW89_MKK][0][40] = 16, [0][1][2][0][RTW89_IC][1][40] = -4, + [0][1][2][0][RTW89_IC][2][40] = 68, [0][1][2][0][RTW89_KCC][1][40] = 12, [0][1][2][0][RTW89_KCC][0][40] = 14, [0][1][2][0][RTW89_ACMA][1][40] = 54, @@ -41419,6 +42484,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][40] = 18, [0][1][2][0][RTW89_UK][1][40] = 54, [0][1][2][0][RTW89_UK][0][40] = 18, + [0][1][2][0][RTW89_THAILAND][1][40] = 42, + [0][1][2][0][RTW89_THAILAND][0][40] = -4, [0][1][2][0][RTW89_FCC][1][42] = -4, [0][1][2][0][RTW89_FCC][2][42] = 68, [0][1][2][0][RTW89_ETSI][1][42] = 54, @@ -41426,6 +42493,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][42] = 54, [0][1][2][0][RTW89_MKK][0][42] = 16, [0][1][2][0][RTW89_IC][1][42] = -4, + [0][1][2][0][RTW89_IC][2][42] = 68, [0][1][2][0][RTW89_KCC][1][42] = 12, [0][1][2][0][RTW89_KCC][0][42] = 14, [0][1][2][0][RTW89_ACMA][1][42] = 54, @@ -41435,6 +42503,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][42] = 18, [0][1][2][0][RTW89_UK][1][42] = 54, [0][1][2][0][RTW89_UK][0][42] = 18, + [0][1][2][0][RTW89_THAILAND][1][42] = 42, + [0][1][2][0][RTW89_THAILAND][0][42] = -4, [0][1][2][0][RTW89_FCC][1][44] = -2, [0][1][2][0][RTW89_FCC][2][44] = 68, [0][1][2][0][RTW89_ETSI][1][44] = 54, @@ -41442,6 +42512,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][44] = 34, [0][1][2][0][RTW89_MKK][0][44] = 16, [0][1][2][0][RTW89_IC][1][44] = -2, + [0][1][2][0][RTW89_IC][2][44] = 68, [0][1][2][0][RTW89_KCC][1][44] = 12, [0][1][2][0][RTW89_KCC][0][44] = 12, [0][1][2][0][RTW89_ACMA][1][44] = 54, @@ -41451,6 +42522,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][44] = 18, [0][1][2][0][RTW89_UK][1][44] = 54, [0][1][2][0][RTW89_UK][0][44] = 18, + [0][1][2][0][RTW89_THAILAND][1][44] = 42, + [0][1][2][0][RTW89_THAILAND][0][44] = -2, [0][1][2][0][RTW89_FCC][1][45] = -2, [0][1][2][0][RTW89_FCC][2][45] = 127, [0][1][2][0][RTW89_ETSI][1][45] = 127, @@ -41458,6 +42531,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][45] = 127, [0][1][2][0][RTW89_MKK][0][45] = 127, [0][1][2][0][RTW89_IC][1][45] = -2, + [0][1][2][0][RTW89_IC][2][45] = 70, [0][1][2][0][RTW89_KCC][1][45] = 12, [0][1][2][0][RTW89_KCC][0][45] = 127, [0][1][2][0][RTW89_ACMA][1][45] = 127, @@ -41467,6 +42541,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][45] = 127, [0][1][2][0][RTW89_UK][1][45] = 127, [0][1][2][0][RTW89_UK][0][45] = 127, + [0][1][2][0][RTW89_THAILAND][1][45] = 127, + [0][1][2][0][RTW89_THAILAND][0][45] = 127, [0][1][2][0][RTW89_FCC][1][47] = -2, [0][1][2][0][RTW89_FCC][2][47] = 127, [0][1][2][0][RTW89_ETSI][1][47] = 127, @@ -41474,6 +42550,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][47] = 127, [0][1][2][0][RTW89_MKK][0][47] = 127, [0][1][2][0][RTW89_IC][1][47] = -2, + [0][1][2][0][RTW89_IC][2][47] = 68, [0][1][2][0][RTW89_KCC][1][47] = 12, [0][1][2][0][RTW89_KCC][0][47] = 127, [0][1][2][0][RTW89_ACMA][1][47] = 127, @@ -41483,6 +42560,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][47] = 127, [0][1][2][0][RTW89_UK][1][47] = 127, [0][1][2][0][RTW89_UK][0][47] = 127, + [0][1][2][0][RTW89_THAILAND][1][47] = 127, + [0][1][2][0][RTW89_THAILAND][0][47] = 127, [0][1][2][0][RTW89_FCC][1][49] = -2, [0][1][2][0][RTW89_FCC][2][49] = 127, [0][1][2][0][RTW89_ETSI][1][49] = 127, @@ -41490,6 +42569,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][49] = 127, [0][1][2][0][RTW89_MKK][0][49] = 127, [0][1][2][0][RTW89_IC][1][49] = -2, + [0][1][2][0][RTW89_IC][2][49] = 68, [0][1][2][0][RTW89_KCC][1][49] = 12, [0][1][2][0][RTW89_KCC][0][49] = 127, [0][1][2][0][RTW89_ACMA][1][49] = 127, @@ -41499,6 +42579,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][49] = 127, [0][1][2][0][RTW89_UK][1][49] = 127, [0][1][2][0][RTW89_UK][0][49] = 127, + [0][1][2][0][RTW89_THAILAND][1][49] = 127, + [0][1][2][0][RTW89_THAILAND][0][49] = 127, [0][1][2][0][RTW89_FCC][1][51] = -2, [0][1][2][0][RTW89_FCC][2][51] = 127, [0][1][2][0][RTW89_ETSI][1][51] = 127, @@ -41506,6 +42588,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][51] = 127, [0][1][2][0][RTW89_MKK][0][51] = 127, [0][1][2][0][RTW89_IC][1][51] = -2, + [0][1][2][0][RTW89_IC][2][51] = 68, [0][1][2][0][RTW89_KCC][1][51] = 12, [0][1][2][0][RTW89_KCC][0][51] = 127, [0][1][2][0][RTW89_ACMA][1][51] = 127, @@ -41515,6 +42598,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][51] = 127, [0][1][2][0][RTW89_UK][1][51] = 127, [0][1][2][0][RTW89_UK][0][51] = 127, + [0][1][2][0][RTW89_THAILAND][1][51] = 127, + [0][1][2][0][RTW89_THAILAND][0][51] = 127, [0][1][2][0][RTW89_FCC][1][53] = -2, [0][1][2][0][RTW89_FCC][2][53] = 127, [0][1][2][0][RTW89_ETSI][1][53] = 127, @@ -41522,6 +42607,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][53] = 127, [0][1][2][0][RTW89_MKK][0][53] = 127, [0][1][2][0][RTW89_IC][1][53] = -2, + [0][1][2][0][RTW89_IC][2][53] = 68, [0][1][2][0][RTW89_KCC][1][53] = 12, [0][1][2][0][RTW89_KCC][0][53] = 127, [0][1][2][0][RTW89_ACMA][1][53] = 127, @@ -41531,6 +42617,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][53] = 127, [0][1][2][0][RTW89_UK][1][53] = 127, [0][1][2][0][RTW89_UK][0][53] = 127, + [0][1][2][0][RTW89_THAILAND][1][53] = 127, + [0][1][2][0][RTW89_THAILAND][0][53] = 127, [0][1][2][0][RTW89_FCC][1][55] = -2, [0][1][2][0][RTW89_FCC][2][55] = 68, [0][1][2][0][RTW89_ETSI][1][55] = 127, @@ -41538,6 +42626,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][55] = 127, [0][1][2][0][RTW89_MKK][0][55] = 127, [0][1][2][0][RTW89_IC][1][55] = -2, + [0][1][2][0][RTW89_IC][2][55] = 68, [0][1][2][0][RTW89_KCC][1][55] = 12, [0][1][2][0][RTW89_KCC][0][55] = 127, [0][1][2][0][RTW89_ACMA][1][55] = 127, @@ -41547,6 +42636,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][55] = 127, [0][1][2][0][RTW89_UK][1][55] = 127, [0][1][2][0][RTW89_UK][0][55] = 127, + [0][1][2][0][RTW89_THAILAND][1][55] = 127, + [0][1][2][0][RTW89_THAILAND][0][55] = 127, [0][1][2][0][RTW89_FCC][1][57] = -2, [0][1][2][0][RTW89_FCC][2][57] = 68, [0][1][2][0][RTW89_ETSI][1][57] = 127, @@ -41554,6 +42645,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][57] = 127, [0][1][2][0][RTW89_MKK][0][57] = 127, [0][1][2][0][RTW89_IC][1][57] = -2, + [0][1][2][0][RTW89_IC][2][57] = 68, [0][1][2][0][RTW89_KCC][1][57] = 12, [0][1][2][0][RTW89_KCC][0][57] = 127, [0][1][2][0][RTW89_ACMA][1][57] = 127, @@ -41563,6 +42655,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][57] = 127, [0][1][2][0][RTW89_UK][1][57] = 127, [0][1][2][0][RTW89_UK][0][57] = 127, + [0][1][2][0][RTW89_THAILAND][1][57] = 127, + [0][1][2][0][RTW89_THAILAND][0][57] = 127, [0][1][2][0][RTW89_FCC][1][59] = -2, [0][1][2][0][RTW89_FCC][2][59] = 68, [0][1][2][0][RTW89_ETSI][1][59] = 127, @@ -41570,6 +42664,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][59] = 127, [0][1][2][0][RTW89_MKK][0][59] = 127, [0][1][2][0][RTW89_IC][1][59] = -2, + [0][1][2][0][RTW89_IC][2][59] = 68, [0][1][2][0][RTW89_KCC][1][59] = 12, [0][1][2][0][RTW89_KCC][0][59] = 127, [0][1][2][0][RTW89_ACMA][1][59] = 127, @@ -41579,6 +42674,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][59] = 127, [0][1][2][0][RTW89_UK][1][59] = 127, [0][1][2][0][RTW89_UK][0][59] = 127, + [0][1][2][0][RTW89_THAILAND][1][59] = 127, + [0][1][2][0][RTW89_THAILAND][0][59] = 127, [0][1][2][0][RTW89_FCC][1][60] = -2, [0][1][2][0][RTW89_FCC][2][60] = 68, [0][1][2][0][RTW89_ETSI][1][60] = 127, @@ -41586,6 +42683,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][60] = 127, [0][1][2][0][RTW89_MKK][0][60] = 127, [0][1][2][0][RTW89_IC][1][60] = -2, + [0][1][2][0][RTW89_IC][2][60] = 68, [0][1][2][0][RTW89_KCC][1][60] = 12, [0][1][2][0][RTW89_KCC][0][60] = 127, [0][1][2][0][RTW89_ACMA][1][60] = 127, @@ -41595,6 +42693,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][60] = 127, [0][1][2][0][RTW89_UK][1][60] = 127, [0][1][2][0][RTW89_UK][0][60] = 127, + [0][1][2][0][RTW89_THAILAND][1][60] = 127, + [0][1][2][0][RTW89_THAILAND][0][60] = 127, [0][1][2][0][RTW89_FCC][1][62] = -2, [0][1][2][0][RTW89_FCC][2][62] = 68, [0][1][2][0][RTW89_ETSI][1][62] = 127, @@ -41602,6 +42702,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][62] = 127, [0][1][2][0][RTW89_MKK][0][62] = 127, [0][1][2][0][RTW89_IC][1][62] = -2, + [0][1][2][0][RTW89_IC][2][62] = 68, [0][1][2][0][RTW89_KCC][1][62] = 12, [0][1][2][0][RTW89_KCC][0][62] = 127, [0][1][2][0][RTW89_ACMA][1][62] = 127, @@ -41611,6 +42712,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][62] = 127, [0][1][2][0][RTW89_UK][1][62] = 127, [0][1][2][0][RTW89_UK][0][62] = 127, + [0][1][2][0][RTW89_THAILAND][1][62] = 127, + [0][1][2][0][RTW89_THAILAND][0][62] = 127, [0][1][2][0][RTW89_FCC][1][64] = -2, [0][1][2][0][RTW89_FCC][2][64] = 68, [0][1][2][0][RTW89_ETSI][1][64] = 127, @@ -41618,6 +42721,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][64] = 127, [0][1][2][0][RTW89_MKK][0][64] = 127, [0][1][2][0][RTW89_IC][1][64] = -2, + [0][1][2][0][RTW89_IC][2][64] = 68, [0][1][2][0][RTW89_KCC][1][64] = 12, [0][1][2][0][RTW89_KCC][0][64] = 127, [0][1][2][0][RTW89_ACMA][1][64] = 127, @@ -41627,6 +42731,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][64] = 127, [0][1][2][0][RTW89_UK][1][64] = 127, [0][1][2][0][RTW89_UK][0][64] = 127, + [0][1][2][0][RTW89_THAILAND][1][64] = 127, + [0][1][2][0][RTW89_THAILAND][0][64] = 127, [0][1][2][0][RTW89_FCC][1][66] = -2, [0][1][2][0][RTW89_FCC][2][66] = 68, [0][1][2][0][RTW89_ETSI][1][66] = 127, @@ -41634,6 +42740,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][66] = 127, [0][1][2][0][RTW89_MKK][0][66] = 127, [0][1][2][0][RTW89_IC][1][66] = -2, + [0][1][2][0][RTW89_IC][2][66] = 68, [0][1][2][0][RTW89_KCC][1][66] = 12, [0][1][2][0][RTW89_KCC][0][66] = 127, [0][1][2][0][RTW89_ACMA][1][66] = 127, @@ -41643,6 +42750,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][66] = 127, [0][1][2][0][RTW89_UK][1][66] = 127, [0][1][2][0][RTW89_UK][0][66] = 127, + [0][1][2][0][RTW89_THAILAND][1][66] = 127, + [0][1][2][0][RTW89_THAILAND][0][66] = 127, [0][1][2][0][RTW89_FCC][1][68] = -2, [0][1][2][0][RTW89_FCC][2][68] = 68, [0][1][2][0][RTW89_ETSI][1][68] = 127, @@ -41650,6 +42759,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][68] = 127, [0][1][2][0][RTW89_MKK][0][68] = 127, [0][1][2][0][RTW89_IC][1][68] = -2, + [0][1][2][0][RTW89_IC][2][68] = 68, [0][1][2][0][RTW89_KCC][1][68] = 12, [0][1][2][0][RTW89_KCC][0][68] = 127, [0][1][2][0][RTW89_ACMA][1][68] = 127, @@ -41659,6 +42769,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][68] = 127, [0][1][2][0][RTW89_UK][1][68] = 127, [0][1][2][0][RTW89_UK][0][68] = 127, + [0][1][2][0][RTW89_THAILAND][1][68] = 127, + [0][1][2][0][RTW89_THAILAND][0][68] = 127, [0][1][2][0][RTW89_FCC][1][70] = -2, [0][1][2][0][RTW89_FCC][2][70] = 68, [0][1][2][0][RTW89_ETSI][1][70] = 127, @@ -41666,6 +42778,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][70] = 127, [0][1][2][0][RTW89_MKK][0][70] = 127, [0][1][2][0][RTW89_IC][1][70] = -2, + [0][1][2][0][RTW89_IC][2][70] = 68, [0][1][2][0][RTW89_KCC][1][70] = 12, [0][1][2][0][RTW89_KCC][0][70] = 127, [0][1][2][0][RTW89_ACMA][1][70] = 127, @@ -41675,6 +42788,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][70] = 127, [0][1][2][0][RTW89_UK][1][70] = 127, [0][1][2][0][RTW89_UK][0][70] = 127, + [0][1][2][0][RTW89_THAILAND][1][70] = 127, + [0][1][2][0][RTW89_THAILAND][0][70] = 127, [0][1][2][0][RTW89_FCC][1][72] = -2, [0][1][2][0][RTW89_FCC][2][72] = 68, [0][1][2][0][RTW89_ETSI][1][72] = 127, @@ -41682,6 +42797,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][72] = 127, [0][1][2][0][RTW89_MKK][0][72] = 127, [0][1][2][0][RTW89_IC][1][72] = -2, + [0][1][2][0][RTW89_IC][2][72] = 68, [0][1][2][0][RTW89_KCC][1][72] = 12, [0][1][2][0][RTW89_KCC][0][72] = 127, [0][1][2][0][RTW89_ACMA][1][72] = 127, @@ -41691,6 +42807,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][72] = 127, [0][1][2][0][RTW89_UK][1][72] = 127, [0][1][2][0][RTW89_UK][0][72] = 127, + [0][1][2][0][RTW89_THAILAND][1][72] = 127, + [0][1][2][0][RTW89_THAILAND][0][72] = 127, [0][1][2][0][RTW89_FCC][1][74] = -2, [0][1][2][0][RTW89_FCC][2][74] = 68, [0][1][2][0][RTW89_ETSI][1][74] = 127, @@ -41698,6 +42816,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][74] = 127, [0][1][2][0][RTW89_MKK][0][74] = 127, [0][1][2][0][RTW89_IC][1][74] = -2, + [0][1][2][0][RTW89_IC][2][74] = 68, [0][1][2][0][RTW89_KCC][1][74] = 12, [0][1][2][0][RTW89_KCC][0][74] = 127, [0][1][2][0][RTW89_ACMA][1][74] = 127, @@ -41707,6 +42826,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][74] = 127, [0][1][2][0][RTW89_UK][1][74] = 127, [0][1][2][0][RTW89_UK][0][74] = 127, + [0][1][2][0][RTW89_THAILAND][1][74] = 127, + [0][1][2][0][RTW89_THAILAND][0][74] = 127, [0][1][2][0][RTW89_FCC][1][75] = -2, [0][1][2][0][RTW89_FCC][2][75] = 68, [0][1][2][0][RTW89_ETSI][1][75] = 127, @@ -41714,6 +42835,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][75] = 127, [0][1][2][0][RTW89_MKK][0][75] = 127, [0][1][2][0][RTW89_IC][1][75] = -2, + [0][1][2][0][RTW89_IC][2][75] = 68, [0][1][2][0][RTW89_KCC][1][75] = 12, [0][1][2][0][RTW89_KCC][0][75] = 127, [0][1][2][0][RTW89_ACMA][1][75] = 127, @@ -41723,6 +42845,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][75] = 127, [0][1][2][0][RTW89_UK][1][75] = 127, [0][1][2][0][RTW89_UK][0][75] = 127, + [0][1][2][0][RTW89_THAILAND][1][75] = 127, + [0][1][2][0][RTW89_THAILAND][0][75] = 127, [0][1][2][0][RTW89_FCC][1][77] = -2, [0][1][2][0][RTW89_FCC][2][77] = 68, [0][1][2][0][RTW89_ETSI][1][77] = 127, @@ -41730,6 +42854,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][77] = 127, [0][1][2][0][RTW89_MKK][0][77] = 127, [0][1][2][0][RTW89_IC][1][77] = -2, + [0][1][2][0][RTW89_IC][2][77] = 68, [0][1][2][0][RTW89_KCC][1][77] = 12, [0][1][2][0][RTW89_KCC][0][77] = 127, [0][1][2][0][RTW89_ACMA][1][77] = 127, @@ -41739,6 +42864,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][77] = 127, [0][1][2][0][RTW89_UK][1][77] = 127, [0][1][2][0][RTW89_UK][0][77] = 127, + [0][1][2][0][RTW89_THAILAND][1][77] = 127, + [0][1][2][0][RTW89_THAILAND][0][77] = 127, [0][1][2][0][RTW89_FCC][1][79] = -2, [0][1][2][0][RTW89_FCC][2][79] = 68, [0][1][2][0][RTW89_ETSI][1][79] = 127, @@ -41746,6 +42873,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][79] = 127, [0][1][2][0][RTW89_MKK][0][79] = 127, [0][1][2][0][RTW89_IC][1][79] = -2, + [0][1][2][0][RTW89_IC][2][79] = 68, [0][1][2][0][RTW89_KCC][1][79] = 12, [0][1][2][0][RTW89_KCC][0][79] = 127, [0][1][2][0][RTW89_ACMA][1][79] = 127, @@ -41755,6 +42883,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][79] = 127, [0][1][2][0][RTW89_UK][1][79] = 127, [0][1][2][0][RTW89_UK][0][79] = 127, + [0][1][2][0][RTW89_THAILAND][1][79] = 127, + [0][1][2][0][RTW89_THAILAND][0][79] = 127, [0][1][2][0][RTW89_FCC][1][81] = -2, [0][1][2][0][RTW89_FCC][2][81] = 68, [0][1][2][0][RTW89_ETSI][1][81] = 127, @@ -41762,6 +42892,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][81] = 127, [0][1][2][0][RTW89_MKK][0][81] = 127, [0][1][2][0][RTW89_IC][1][81] = -2, + [0][1][2][0][RTW89_IC][2][81] = 68, [0][1][2][0][RTW89_KCC][1][81] = 12, [0][1][2][0][RTW89_KCC][0][81] = 127, [0][1][2][0][RTW89_ACMA][1][81] = 127, @@ -41771,6 +42902,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][81] = 127, [0][1][2][0][RTW89_UK][1][81] = 127, [0][1][2][0][RTW89_UK][0][81] = 127, + [0][1][2][0][RTW89_THAILAND][1][81] = 127, + [0][1][2][0][RTW89_THAILAND][0][81] = 127, [0][1][2][0][RTW89_FCC][1][83] = -2, [0][1][2][0][RTW89_FCC][2][83] = 68, [0][1][2][0][RTW89_ETSI][1][83] = 127, @@ -41778,6 +42911,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][83] = 127, [0][1][2][0][RTW89_MKK][0][83] = 127, [0][1][2][0][RTW89_IC][1][83] = -2, + [0][1][2][0][RTW89_IC][2][83] = 68, [0][1][2][0][RTW89_KCC][1][83] = 20, [0][1][2][0][RTW89_KCC][0][83] = 127, [0][1][2][0][RTW89_ACMA][1][83] = 127, @@ -41787,6 +42921,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][83] = 127, [0][1][2][0][RTW89_UK][1][83] = 127, [0][1][2][0][RTW89_UK][0][83] = 127, + [0][1][2][0][RTW89_THAILAND][1][83] = 127, + [0][1][2][0][RTW89_THAILAND][0][83] = 127, [0][1][2][0][RTW89_FCC][1][85] = -2, [0][1][2][0][RTW89_FCC][2][85] = 68, [0][1][2][0][RTW89_ETSI][1][85] = 127, @@ -41794,6 +42930,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][85] = 127, [0][1][2][0][RTW89_MKK][0][85] = 127, [0][1][2][0][RTW89_IC][1][85] = -2, + [0][1][2][0][RTW89_IC][2][85] = 68, [0][1][2][0][RTW89_KCC][1][85] = 20, [0][1][2][0][RTW89_KCC][0][85] = 127, [0][1][2][0][RTW89_ACMA][1][85] = 127, @@ -41803,6 +42940,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][85] = 127, [0][1][2][0][RTW89_UK][1][85] = 127, [0][1][2][0][RTW89_UK][0][85] = 127, + [0][1][2][0][RTW89_THAILAND][1][85] = 127, + [0][1][2][0][RTW89_THAILAND][0][85] = 127, [0][1][2][0][RTW89_FCC][1][87] = -2, [0][1][2][0][RTW89_FCC][2][87] = 127, [0][1][2][0][RTW89_ETSI][1][87] = 127, @@ -41810,6 +42949,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][87] = 127, [0][1][2][0][RTW89_MKK][0][87] = 127, [0][1][2][0][RTW89_IC][1][87] = -2, + [0][1][2][0][RTW89_IC][2][87] = 127, [0][1][2][0][RTW89_KCC][1][87] = 20, [0][1][2][0][RTW89_KCC][0][87] = 127, [0][1][2][0][RTW89_ACMA][1][87] = 127, @@ -41819,6 +42959,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][87] = 127, [0][1][2][0][RTW89_UK][1][87] = 127, [0][1][2][0][RTW89_UK][0][87] = 127, + [0][1][2][0][RTW89_THAILAND][1][87] = 127, + [0][1][2][0][RTW89_THAILAND][0][87] = 127, [0][1][2][0][RTW89_FCC][1][89] = -2, [0][1][2][0][RTW89_FCC][2][89] = 127, [0][1][2][0][RTW89_ETSI][1][89] = 127, @@ -41826,6 +42968,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][89] = 127, [0][1][2][0][RTW89_MKK][0][89] = 127, [0][1][2][0][RTW89_IC][1][89] = -2, + [0][1][2][0][RTW89_IC][2][89] = 127, [0][1][2][0][RTW89_KCC][1][89] = 20, [0][1][2][0][RTW89_KCC][0][89] = 127, [0][1][2][0][RTW89_ACMA][1][89] = 127, @@ -41835,6 +42978,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][89] = 127, [0][1][2][0][RTW89_UK][1][89] = 127, [0][1][2][0][RTW89_UK][0][89] = 127, + [0][1][2][0][RTW89_THAILAND][1][89] = 127, + [0][1][2][0][RTW89_THAILAND][0][89] = 127, [0][1][2][0][RTW89_FCC][1][90] = -2, [0][1][2][0][RTW89_FCC][2][90] = 127, [0][1][2][0][RTW89_ETSI][1][90] = 127, @@ -41842,6 +42987,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][90] = 127, [0][1][2][0][RTW89_MKK][0][90] = 127, [0][1][2][0][RTW89_IC][1][90] = -2, + [0][1][2][0][RTW89_IC][2][90] = 127, [0][1][2][0][RTW89_KCC][1][90] = 20, [0][1][2][0][RTW89_KCC][0][90] = 127, [0][1][2][0][RTW89_ACMA][1][90] = 127, @@ -41851,6 +42997,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][90] = 127, [0][1][2][0][RTW89_UK][1][90] = 127, [0][1][2][0][RTW89_UK][0][90] = 127, + [0][1][2][0][RTW89_THAILAND][1][90] = 127, + [0][1][2][0][RTW89_THAILAND][0][90] = 127, [0][1][2][0][RTW89_FCC][1][92] = -2, [0][1][2][0][RTW89_FCC][2][92] = 127, [0][1][2][0][RTW89_ETSI][1][92] = 127, @@ -41858,6 +43006,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][92] = 127, [0][1][2][0][RTW89_MKK][0][92] = 127, [0][1][2][0][RTW89_IC][1][92] = -2, + [0][1][2][0][RTW89_IC][2][92] = 127, [0][1][2][0][RTW89_KCC][1][92] = 20, [0][1][2][0][RTW89_KCC][0][92] = 127, [0][1][2][0][RTW89_ACMA][1][92] = 127, @@ -41867,6 +43016,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][92] = 127, [0][1][2][0][RTW89_UK][1][92] = 127, [0][1][2][0][RTW89_UK][0][92] = 127, + [0][1][2][0][RTW89_THAILAND][1][92] = 127, + [0][1][2][0][RTW89_THAILAND][0][92] = 127, [0][1][2][0][RTW89_FCC][1][94] = -2, [0][1][2][0][RTW89_FCC][2][94] = 127, [0][1][2][0][RTW89_ETSI][1][94] = 127, @@ -41874,6 +43025,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][94] = 127, [0][1][2][0][RTW89_MKK][0][94] = 127, [0][1][2][0][RTW89_IC][1][94] = -2, + [0][1][2][0][RTW89_IC][2][94] = 127, [0][1][2][0][RTW89_KCC][1][94] = 20, [0][1][2][0][RTW89_KCC][0][94] = 127, [0][1][2][0][RTW89_ACMA][1][94] = 127, @@ -41883,6 +43035,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][94] = 127, [0][1][2][0][RTW89_UK][1][94] = 127, [0][1][2][0][RTW89_UK][0][94] = 127, + [0][1][2][0][RTW89_THAILAND][1][94] = 127, + [0][1][2][0][RTW89_THAILAND][0][94] = 127, [0][1][2][0][RTW89_FCC][1][96] = -2, [0][1][2][0][RTW89_FCC][2][96] = 127, [0][1][2][0][RTW89_ETSI][1][96] = 127, @@ -41890,6 +43044,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][96] = 127, [0][1][2][0][RTW89_MKK][0][96] = 127, [0][1][2][0][RTW89_IC][1][96] = -2, + [0][1][2][0][RTW89_IC][2][96] = 127, [0][1][2][0][RTW89_KCC][1][96] = 20, [0][1][2][0][RTW89_KCC][0][96] = 127, [0][1][2][0][RTW89_ACMA][1][96] = 127, @@ -41899,6 +43054,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][96] = 127, [0][1][2][0][RTW89_UK][1][96] = 127, [0][1][2][0][RTW89_UK][0][96] = 127, + [0][1][2][0][RTW89_THAILAND][1][96] = 127, + [0][1][2][0][RTW89_THAILAND][0][96] = 127, [0][1][2][0][RTW89_FCC][1][98] = -2, [0][1][2][0][RTW89_FCC][2][98] = 127, [0][1][2][0][RTW89_ETSI][1][98] = 127, @@ -41906,6 +43063,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][98] = 127, [0][1][2][0][RTW89_MKK][0][98] = 127, [0][1][2][0][RTW89_IC][1][98] = -2, + [0][1][2][0][RTW89_IC][2][98] = 127, [0][1][2][0][RTW89_KCC][1][98] = 20, [0][1][2][0][RTW89_KCC][0][98] = 127, [0][1][2][0][RTW89_ACMA][1][98] = 127, @@ -41915,6 +43073,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][98] = 127, [0][1][2][0][RTW89_UK][1][98] = 127, [0][1][2][0][RTW89_UK][0][98] = 127, + [0][1][2][0][RTW89_THAILAND][1][98] = 127, + [0][1][2][0][RTW89_THAILAND][0][98] = 127, [0][1][2][0][RTW89_FCC][1][100] = -2, [0][1][2][0][RTW89_FCC][2][100] = 127, [0][1][2][0][RTW89_ETSI][1][100] = 127, @@ -41922,6 +43082,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][100] = 127, [0][1][2][0][RTW89_MKK][0][100] = 127, [0][1][2][0][RTW89_IC][1][100] = -2, + [0][1][2][0][RTW89_IC][2][100] = 127, [0][1][2][0][RTW89_KCC][1][100] = 20, [0][1][2][0][RTW89_KCC][0][100] = 127, [0][1][2][0][RTW89_ACMA][1][100] = 127, @@ -41931,6 +43092,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][100] = 127, [0][1][2][0][RTW89_UK][1][100] = 127, [0][1][2][0][RTW89_UK][0][100] = 127, + [0][1][2][0][RTW89_THAILAND][1][100] = 127, + [0][1][2][0][RTW89_THAILAND][0][100] = 127, [0][1][2][0][RTW89_FCC][1][102] = -2, [0][1][2][0][RTW89_FCC][2][102] = 127, [0][1][2][0][RTW89_ETSI][1][102] = 127, @@ -41938,6 +43101,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][102] = 127, [0][1][2][0][RTW89_MKK][0][102] = 127, [0][1][2][0][RTW89_IC][1][102] = -2, + [0][1][2][0][RTW89_IC][2][102] = 127, [0][1][2][0][RTW89_KCC][1][102] = 20, [0][1][2][0][RTW89_KCC][0][102] = 127, [0][1][2][0][RTW89_ACMA][1][102] = 127, @@ -41947,6 +43111,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][102] = 127, [0][1][2][0][RTW89_UK][1][102] = 127, [0][1][2][0][RTW89_UK][0][102] = 127, + [0][1][2][0][RTW89_THAILAND][1][102] = 127, + [0][1][2][0][RTW89_THAILAND][0][102] = 127, [0][1][2][0][RTW89_FCC][1][104] = -2, [0][1][2][0][RTW89_FCC][2][104] = 127, [0][1][2][0][RTW89_ETSI][1][104] = 127, @@ -41954,6 +43120,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][104] = 127, [0][1][2][0][RTW89_MKK][0][104] = 127, [0][1][2][0][RTW89_IC][1][104] = -2, + [0][1][2][0][RTW89_IC][2][104] = 127, [0][1][2][0][RTW89_KCC][1][104] = 20, [0][1][2][0][RTW89_KCC][0][104] = 127, [0][1][2][0][RTW89_ACMA][1][104] = 127, @@ -41963,6 +43130,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][104] = 127, [0][1][2][0][RTW89_UK][1][104] = 127, [0][1][2][0][RTW89_UK][0][104] = 127, + [0][1][2][0][RTW89_THAILAND][1][104] = 127, + [0][1][2][0][RTW89_THAILAND][0][104] = 127, [0][1][2][0][RTW89_FCC][1][105] = -2, [0][1][2][0][RTW89_FCC][2][105] = 127, [0][1][2][0][RTW89_ETSI][1][105] = 127, @@ -41970,6 +43139,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][105] = 127, [0][1][2][0][RTW89_MKK][0][105] = 127, [0][1][2][0][RTW89_IC][1][105] = -2, + [0][1][2][0][RTW89_IC][2][105] = 127, [0][1][2][0][RTW89_KCC][1][105] = 20, [0][1][2][0][RTW89_KCC][0][105] = 127, [0][1][2][0][RTW89_ACMA][1][105] = 127, @@ -41979,6 +43149,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][105] = 127, [0][1][2][0][RTW89_UK][1][105] = 127, [0][1][2][0][RTW89_UK][0][105] = 127, + [0][1][2][0][RTW89_THAILAND][1][105] = 127, + [0][1][2][0][RTW89_THAILAND][0][105] = 127, [0][1][2][0][RTW89_FCC][1][107] = 1, [0][1][2][0][RTW89_FCC][2][107] = 127, [0][1][2][0][RTW89_ETSI][1][107] = 127, @@ -41986,6 +43158,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][107] = 127, [0][1][2][0][RTW89_MKK][0][107] = 127, [0][1][2][0][RTW89_IC][1][107] = 1, + [0][1][2][0][RTW89_IC][2][107] = 127, [0][1][2][0][RTW89_KCC][1][107] = 20, [0][1][2][0][RTW89_KCC][0][107] = 127, [0][1][2][0][RTW89_ACMA][1][107] = 127, @@ -41995,6 +43168,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][107] = 127, [0][1][2][0][RTW89_UK][1][107] = 127, [0][1][2][0][RTW89_UK][0][107] = 127, + [0][1][2][0][RTW89_THAILAND][1][107] = 127, + [0][1][2][0][RTW89_THAILAND][0][107] = 127, [0][1][2][0][RTW89_FCC][1][109] = 1, [0][1][2][0][RTW89_FCC][2][109] = 127, [0][1][2][0][RTW89_ETSI][1][109] = 127, @@ -42002,6 +43177,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][109] = 127, [0][1][2][0][RTW89_MKK][0][109] = 127, [0][1][2][0][RTW89_IC][1][109] = 1, + [0][1][2][0][RTW89_IC][2][109] = 127, [0][1][2][0][RTW89_KCC][1][109] = 20, [0][1][2][0][RTW89_KCC][0][109] = 127, [0][1][2][0][RTW89_ACMA][1][109] = 127, @@ -42011,6 +43187,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][109] = 127, [0][1][2][0][RTW89_UK][1][109] = 127, [0][1][2][0][RTW89_UK][0][109] = 127, + [0][1][2][0][RTW89_THAILAND][1][109] = 127, + [0][1][2][0][RTW89_THAILAND][0][109] = 127, [0][1][2][0][RTW89_FCC][1][111] = 127, [0][1][2][0][RTW89_FCC][2][111] = 127, [0][1][2][0][RTW89_ETSI][1][111] = 127, @@ -42018,6 +43196,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][111] = 127, [0][1][2][0][RTW89_MKK][0][111] = 127, [0][1][2][0][RTW89_IC][1][111] = 127, + [0][1][2][0][RTW89_IC][2][111] = 127, [0][1][2][0][RTW89_KCC][1][111] = 127, [0][1][2][0][RTW89_KCC][0][111] = 127, [0][1][2][0][RTW89_ACMA][1][111] = 127, @@ -42027,6 +43206,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][111] = 127, [0][1][2][0][RTW89_UK][1][111] = 127, [0][1][2][0][RTW89_UK][0][111] = 127, + [0][1][2][0][RTW89_THAILAND][1][111] = 127, + [0][1][2][0][RTW89_THAILAND][0][111] = 127, [0][1][2][0][RTW89_FCC][1][113] = 127, [0][1][2][0][RTW89_FCC][2][113] = 127, [0][1][2][0][RTW89_ETSI][1][113] = 127, @@ -42034,6 +43215,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][113] = 127, [0][1][2][0][RTW89_MKK][0][113] = 127, [0][1][2][0][RTW89_IC][1][113] = 127, + [0][1][2][0][RTW89_IC][2][113] = 127, [0][1][2][0][RTW89_KCC][1][113] = 127, [0][1][2][0][RTW89_KCC][0][113] = 127, [0][1][2][0][RTW89_ACMA][1][113] = 127, @@ -42043,6 +43225,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][113] = 127, [0][1][2][0][RTW89_UK][1][113] = 127, [0][1][2][0][RTW89_UK][0][113] = 127, + [0][1][2][0][RTW89_THAILAND][1][113] = 127, + [0][1][2][0][RTW89_THAILAND][0][113] = 127, [0][1][2][0][RTW89_FCC][1][115] = 127, [0][1][2][0][RTW89_FCC][2][115] = 127, [0][1][2][0][RTW89_ETSI][1][115] = 127, @@ -42050,6 +43234,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][115] = 127, [0][1][2][0][RTW89_MKK][0][115] = 127, [0][1][2][0][RTW89_IC][1][115] = 127, + [0][1][2][0][RTW89_IC][2][115] = 127, [0][1][2][0][RTW89_KCC][1][115] = 127, [0][1][2][0][RTW89_KCC][0][115] = 127, [0][1][2][0][RTW89_ACMA][1][115] = 127, @@ -42059,6 +43244,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][115] = 127, [0][1][2][0][RTW89_UK][1][115] = 127, [0][1][2][0][RTW89_UK][0][115] = 127, + [0][1][2][0][RTW89_THAILAND][1][115] = 127, + [0][1][2][0][RTW89_THAILAND][0][115] = 127, [0][1][2][0][RTW89_FCC][1][117] = 127, [0][1][2][0][RTW89_FCC][2][117] = 127, [0][1][2][0][RTW89_ETSI][1][117] = 127, @@ -42066,6 +43253,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][117] = 127, [0][1][2][0][RTW89_MKK][0][117] = 127, [0][1][2][0][RTW89_IC][1][117] = 127, + [0][1][2][0][RTW89_IC][2][117] = 127, [0][1][2][0][RTW89_KCC][1][117] = 127, [0][1][2][0][RTW89_KCC][0][117] = 127, [0][1][2][0][RTW89_ACMA][1][117] = 127, @@ -42075,6 +43263,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][117] = 127, [0][1][2][0][RTW89_UK][1][117] = 127, [0][1][2][0][RTW89_UK][0][117] = 127, + [0][1][2][0][RTW89_THAILAND][1][117] = 127, + [0][1][2][0][RTW89_THAILAND][0][117] = 127, [0][1][2][0][RTW89_FCC][1][119] = 127, [0][1][2][0][RTW89_FCC][2][119] = 127, [0][1][2][0][RTW89_ETSI][1][119] = 127, @@ -42082,6 +43272,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][119] = 127, [0][1][2][0][RTW89_MKK][0][119] = 127, [0][1][2][0][RTW89_IC][1][119] = 127, + [0][1][2][0][RTW89_IC][2][119] = 127, [0][1][2][0][RTW89_KCC][1][119] = 127, [0][1][2][0][RTW89_KCC][0][119] = 127, [0][1][2][0][RTW89_ACMA][1][119] = 127, @@ -42091,6 +43282,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][119] = 127, [0][1][2][0][RTW89_UK][1][119] = 127, [0][1][2][0][RTW89_UK][0][119] = 127, + [0][1][2][0][RTW89_THAILAND][1][119] = 127, + [0][1][2][0][RTW89_THAILAND][0][119] = 127, [0][1][2][1][RTW89_FCC][1][0] = -2, [0][1][2][1][RTW89_FCC][2][0] = 54, [0][1][2][1][RTW89_ETSI][1][0] = 42, @@ -42098,6 +43291,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][0] = 56, [0][1][2][1][RTW89_MKK][0][0] = 16, [0][1][2][1][RTW89_IC][1][0] = -2, + [0][1][2][1][RTW89_IC][2][0] = 54, [0][1][2][1][RTW89_KCC][1][0] = 12, [0][1][2][1][RTW89_KCC][0][0] = 10, [0][1][2][1][RTW89_ACMA][1][0] = 42, @@ -42107,6 +43301,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][0] = 6, [0][1][2][1][RTW89_UK][1][0] = 42, [0][1][2][1][RTW89_UK][0][0] = 6, + [0][1][2][1][RTW89_THAILAND][1][0] = 44, + [0][1][2][1][RTW89_THAILAND][0][0] = -2, [0][1][2][1][RTW89_FCC][1][2] = -4, [0][1][2][1][RTW89_FCC][2][2] = 54, [0][1][2][1][RTW89_ETSI][1][2] = 42, @@ -42114,6 +43310,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][2] = 54, [0][1][2][1][RTW89_MKK][0][2] = 16, [0][1][2][1][RTW89_IC][1][2] = -4, + [0][1][2][1][RTW89_IC][2][2] = 54, [0][1][2][1][RTW89_KCC][1][2] = 12, [0][1][2][1][RTW89_KCC][0][2] = 12, [0][1][2][1][RTW89_ACMA][1][2] = 42, @@ -42123,6 +43320,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][2] = 6, [0][1][2][1][RTW89_UK][1][2] = 42, [0][1][2][1][RTW89_UK][0][2] = 6, + [0][1][2][1][RTW89_THAILAND][1][2] = 44, + [0][1][2][1][RTW89_THAILAND][0][2] = -4, [0][1][2][1][RTW89_FCC][1][4] = -4, [0][1][2][1][RTW89_FCC][2][4] = 54, [0][1][2][1][RTW89_ETSI][1][4] = 42, @@ -42130,6 +43329,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][4] = 54, [0][1][2][1][RTW89_MKK][0][4] = 16, [0][1][2][1][RTW89_IC][1][4] = -4, + [0][1][2][1][RTW89_IC][2][4] = 54, [0][1][2][1][RTW89_KCC][1][4] = 12, [0][1][2][1][RTW89_KCC][0][4] = 12, [0][1][2][1][RTW89_ACMA][1][4] = 42, @@ -42139,6 +43339,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][4] = 6, [0][1][2][1][RTW89_UK][1][4] = 42, [0][1][2][1][RTW89_UK][0][4] = 6, + [0][1][2][1][RTW89_THAILAND][1][4] = 44, + [0][1][2][1][RTW89_THAILAND][0][4] = -4, [0][1][2][1][RTW89_FCC][1][6] = -4, [0][1][2][1][RTW89_FCC][2][6] = 54, [0][1][2][1][RTW89_ETSI][1][6] = 42, @@ -42146,6 +43348,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][6] = 54, [0][1][2][1][RTW89_MKK][0][6] = 16, [0][1][2][1][RTW89_IC][1][6] = -4, + [0][1][2][1][RTW89_IC][2][6] = 54, [0][1][2][1][RTW89_KCC][1][6] = 12, [0][1][2][1][RTW89_KCC][0][6] = 12, [0][1][2][1][RTW89_ACMA][1][6] = 42, @@ -42155,6 +43358,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][6] = 6, [0][1][2][1][RTW89_UK][1][6] = 42, [0][1][2][1][RTW89_UK][0][6] = 6, + [0][1][2][1][RTW89_THAILAND][1][6] = 44, + [0][1][2][1][RTW89_THAILAND][0][6] = -4, [0][1][2][1][RTW89_FCC][1][8] = -4, [0][1][2][1][RTW89_FCC][2][8] = 54, [0][1][2][1][RTW89_ETSI][1][8] = 42, @@ -42162,6 +43367,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][8] = 54, [0][1][2][1][RTW89_MKK][0][8] = 16, [0][1][2][1][RTW89_IC][1][8] = -4, + [0][1][2][1][RTW89_IC][2][8] = 54, [0][1][2][1][RTW89_KCC][1][8] = 12, [0][1][2][1][RTW89_KCC][0][8] = 12, [0][1][2][1][RTW89_ACMA][1][8] = 42, @@ -42171,6 +43377,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][8] = 6, [0][1][2][1][RTW89_UK][1][8] = 42, [0][1][2][1][RTW89_UK][0][8] = 6, + [0][1][2][1][RTW89_THAILAND][1][8] = 44, + [0][1][2][1][RTW89_THAILAND][0][8] = -4, [0][1][2][1][RTW89_FCC][1][10] = -4, [0][1][2][1][RTW89_FCC][2][10] = 54, [0][1][2][1][RTW89_ETSI][1][10] = 42, @@ -42178,6 +43386,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][10] = 54, [0][1][2][1][RTW89_MKK][0][10] = 16, [0][1][2][1][RTW89_IC][1][10] = -4, + [0][1][2][1][RTW89_IC][2][10] = 54, [0][1][2][1][RTW89_KCC][1][10] = 12, [0][1][2][1][RTW89_KCC][0][10] = 12, [0][1][2][1][RTW89_ACMA][1][10] = 42, @@ -42187,6 +43396,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][10] = 6, [0][1][2][1][RTW89_UK][1][10] = 42, [0][1][2][1][RTW89_UK][0][10] = 6, + [0][1][2][1][RTW89_THAILAND][1][10] = 44, + [0][1][2][1][RTW89_THAILAND][0][10] = -4, [0][1][2][1][RTW89_FCC][1][12] = -4, [0][1][2][1][RTW89_FCC][2][12] = 54, [0][1][2][1][RTW89_ETSI][1][12] = 42, @@ -42194,6 +43405,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][12] = 54, [0][1][2][1][RTW89_MKK][0][12] = 16, [0][1][2][1][RTW89_IC][1][12] = -4, + [0][1][2][1][RTW89_IC][2][12] = 54, [0][1][2][1][RTW89_KCC][1][12] = 12, [0][1][2][1][RTW89_KCC][0][12] = 12, [0][1][2][1][RTW89_ACMA][1][12] = 42, @@ -42203,6 +43415,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][12] = 6, [0][1][2][1][RTW89_UK][1][12] = 42, [0][1][2][1][RTW89_UK][0][12] = 6, + [0][1][2][1][RTW89_THAILAND][1][12] = 44, + [0][1][2][1][RTW89_THAILAND][0][12] = -4, [0][1][2][1][RTW89_FCC][1][14] = -4, [0][1][2][1][RTW89_FCC][2][14] = 54, [0][1][2][1][RTW89_ETSI][1][14] = 42, @@ -42210,6 +43424,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][14] = 54, [0][1][2][1][RTW89_MKK][0][14] = 16, [0][1][2][1][RTW89_IC][1][14] = -4, + [0][1][2][1][RTW89_IC][2][14] = 54, [0][1][2][1][RTW89_KCC][1][14] = 12, [0][1][2][1][RTW89_KCC][0][14] = 12, [0][1][2][1][RTW89_ACMA][1][14] = 42, @@ -42219,6 +43434,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][14] = 6, [0][1][2][1][RTW89_UK][1][14] = 42, [0][1][2][1][RTW89_UK][0][14] = 6, + [0][1][2][1][RTW89_THAILAND][1][14] = 44, + [0][1][2][1][RTW89_THAILAND][0][14] = -4, [0][1][2][1][RTW89_FCC][1][15] = -4, [0][1][2][1][RTW89_FCC][2][15] = 54, [0][1][2][1][RTW89_ETSI][1][15] = 42, @@ -42226,6 +43443,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][15] = 54, [0][1][2][1][RTW89_MKK][0][15] = 16, [0][1][2][1][RTW89_IC][1][15] = -4, + [0][1][2][1][RTW89_IC][2][15] = 54, [0][1][2][1][RTW89_KCC][1][15] = 12, [0][1][2][1][RTW89_KCC][0][15] = 12, [0][1][2][1][RTW89_ACMA][1][15] = 42, @@ -42235,6 +43453,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][15] = 6, [0][1][2][1][RTW89_UK][1][15] = 42, [0][1][2][1][RTW89_UK][0][15] = 6, + [0][1][2][1][RTW89_THAILAND][1][15] = 44, + [0][1][2][1][RTW89_THAILAND][0][15] = -4, [0][1][2][1][RTW89_FCC][1][17] = -4, [0][1][2][1][RTW89_FCC][2][17] = 54, [0][1][2][1][RTW89_ETSI][1][17] = 42, @@ -42242,6 +43462,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][17] = 54, [0][1][2][1][RTW89_MKK][0][17] = 16, [0][1][2][1][RTW89_IC][1][17] = -4, + [0][1][2][1][RTW89_IC][2][17] = 54, [0][1][2][1][RTW89_KCC][1][17] = 12, [0][1][2][1][RTW89_KCC][0][17] = 12, [0][1][2][1][RTW89_ACMA][1][17] = 42, @@ -42251,6 +43472,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][17] = 6, [0][1][2][1][RTW89_UK][1][17] = 42, [0][1][2][1][RTW89_UK][0][17] = 6, + [0][1][2][1][RTW89_THAILAND][1][17] = 44, + [0][1][2][1][RTW89_THAILAND][0][17] = -4, [0][1][2][1][RTW89_FCC][1][19] = -4, [0][1][2][1][RTW89_FCC][2][19] = 54, [0][1][2][1][RTW89_ETSI][1][19] = 42, @@ -42258,6 +43481,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][19] = 54, [0][1][2][1][RTW89_MKK][0][19] = 16, [0][1][2][1][RTW89_IC][1][19] = -4, + [0][1][2][1][RTW89_IC][2][19] = 54, [0][1][2][1][RTW89_KCC][1][19] = 12, [0][1][2][1][RTW89_KCC][0][19] = 12, [0][1][2][1][RTW89_ACMA][1][19] = 42, @@ -42267,6 +43491,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][19] = 6, [0][1][2][1][RTW89_UK][1][19] = 42, [0][1][2][1][RTW89_UK][0][19] = 6, + [0][1][2][1][RTW89_THAILAND][1][19] = 44, + [0][1][2][1][RTW89_THAILAND][0][19] = -4, [0][1][2][1][RTW89_FCC][1][21] = -4, [0][1][2][1][RTW89_FCC][2][21] = 54, [0][1][2][1][RTW89_ETSI][1][21] = 42, @@ -42274,6 +43500,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][21] = 54, [0][1][2][1][RTW89_MKK][0][21] = 16, [0][1][2][1][RTW89_IC][1][21] = -4, + [0][1][2][1][RTW89_IC][2][21] = 54, [0][1][2][1][RTW89_KCC][1][21] = 12, [0][1][2][1][RTW89_KCC][0][21] = 12, [0][1][2][1][RTW89_ACMA][1][21] = 42, @@ -42283,6 +43510,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][21] = 6, [0][1][2][1][RTW89_UK][1][21] = 42, [0][1][2][1][RTW89_UK][0][21] = 6, + [0][1][2][1][RTW89_THAILAND][1][21] = 44, + [0][1][2][1][RTW89_THAILAND][0][21] = -4, [0][1][2][1][RTW89_FCC][1][23] = -4, [0][1][2][1][RTW89_FCC][2][23] = 68, [0][1][2][1][RTW89_ETSI][1][23] = 42, @@ -42290,6 +43519,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][23] = 54, [0][1][2][1][RTW89_MKK][0][23] = 16, [0][1][2][1][RTW89_IC][1][23] = -4, + [0][1][2][1][RTW89_IC][2][23] = 68, [0][1][2][1][RTW89_KCC][1][23] = 12, [0][1][2][1][RTW89_KCC][0][23] = 10, [0][1][2][1][RTW89_ACMA][1][23] = 42, @@ -42299,6 +43529,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][23] = 6, [0][1][2][1][RTW89_UK][1][23] = 42, [0][1][2][1][RTW89_UK][0][23] = 6, + [0][1][2][1][RTW89_THAILAND][1][23] = 44, + [0][1][2][1][RTW89_THAILAND][0][23] = -4, [0][1][2][1][RTW89_FCC][1][25] = -4, [0][1][2][1][RTW89_FCC][2][25] = 68, [0][1][2][1][RTW89_ETSI][1][25] = 42, @@ -42306,6 +43538,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][25] = 54, [0][1][2][1][RTW89_MKK][0][25] = 16, [0][1][2][1][RTW89_IC][1][25] = -4, + [0][1][2][1][RTW89_IC][2][25] = 68, [0][1][2][1][RTW89_KCC][1][25] = 12, [0][1][2][1][RTW89_KCC][0][25] = 14, [0][1][2][1][RTW89_ACMA][1][25] = 42, @@ -42315,6 +43548,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][25] = 6, [0][1][2][1][RTW89_UK][1][25] = 42, [0][1][2][1][RTW89_UK][0][25] = 6, + [0][1][2][1][RTW89_THAILAND][1][25] = 42, + [0][1][2][1][RTW89_THAILAND][0][25] = -4, [0][1][2][1][RTW89_FCC][1][27] = -4, [0][1][2][1][RTW89_FCC][2][27] = 68, [0][1][2][1][RTW89_ETSI][1][27] = 42, @@ -42322,6 +43557,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][27] = 54, [0][1][2][1][RTW89_MKK][0][27] = 16, [0][1][2][1][RTW89_IC][1][27] = -4, + [0][1][2][1][RTW89_IC][2][27] = 68, [0][1][2][1][RTW89_KCC][1][27] = 12, [0][1][2][1][RTW89_KCC][0][27] = 14, [0][1][2][1][RTW89_ACMA][1][27] = 42, @@ -42331,6 +43567,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][27] = 6, [0][1][2][1][RTW89_UK][1][27] = 42, [0][1][2][1][RTW89_UK][0][27] = 6, + [0][1][2][1][RTW89_THAILAND][1][27] = 42, + [0][1][2][1][RTW89_THAILAND][0][27] = -4, [0][1][2][1][RTW89_FCC][1][29] = -4, [0][1][2][1][RTW89_FCC][2][29] = 68, [0][1][2][1][RTW89_ETSI][1][29] = 42, @@ -42338,6 +43576,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][29] = 54, [0][1][2][1][RTW89_MKK][0][29] = 16, [0][1][2][1][RTW89_IC][1][29] = -4, + [0][1][2][1][RTW89_IC][2][29] = 68, [0][1][2][1][RTW89_KCC][1][29] = 12, [0][1][2][1][RTW89_KCC][0][29] = 14, [0][1][2][1][RTW89_ACMA][1][29] = 42, @@ -42347,6 +43586,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][29] = 6, [0][1][2][1][RTW89_UK][1][29] = 42, [0][1][2][1][RTW89_UK][0][29] = 6, + [0][1][2][1][RTW89_THAILAND][1][29] = 42, + [0][1][2][1][RTW89_THAILAND][0][29] = -4, [0][1][2][1][RTW89_FCC][1][30] = -4, [0][1][2][1][RTW89_FCC][2][30] = 68, [0][1][2][1][RTW89_ETSI][1][30] = 42, @@ -42354,6 +43595,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][30] = 54, [0][1][2][1][RTW89_MKK][0][30] = 16, [0][1][2][1][RTW89_IC][1][30] = -4, + [0][1][2][1][RTW89_IC][2][30] = 68, [0][1][2][1][RTW89_KCC][1][30] = 12, [0][1][2][1][RTW89_KCC][0][30] = 14, [0][1][2][1][RTW89_ACMA][1][30] = 42, @@ -42363,6 +43605,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][30] = 6, [0][1][2][1][RTW89_UK][1][30] = 42, [0][1][2][1][RTW89_UK][0][30] = 6, + [0][1][2][1][RTW89_THAILAND][1][30] = 42, + [0][1][2][1][RTW89_THAILAND][0][30] = -4, [0][1][2][1][RTW89_FCC][1][32] = -4, [0][1][2][1][RTW89_FCC][2][32] = 68, [0][1][2][1][RTW89_ETSI][1][32] = 42, @@ -42370,6 +43614,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][32] = 54, [0][1][2][1][RTW89_MKK][0][32] = 16, [0][1][2][1][RTW89_IC][1][32] = -4, + [0][1][2][1][RTW89_IC][2][32] = 68, [0][1][2][1][RTW89_KCC][1][32] = 12, [0][1][2][1][RTW89_KCC][0][32] = 14, [0][1][2][1][RTW89_ACMA][1][32] = 42, @@ -42379,6 +43624,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][32] = 6, [0][1][2][1][RTW89_UK][1][32] = 42, [0][1][2][1][RTW89_UK][0][32] = 6, + [0][1][2][1][RTW89_THAILAND][1][32] = 42, + [0][1][2][1][RTW89_THAILAND][0][32] = -4, [0][1][2][1][RTW89_FCC][1][34] = -4, [0][1][2][1][RTW89_FCC][2][34] = 68, [0][1][2][1][RTW89_ETSI][1][34] = 42, @@ -42386,6 +43633,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][34] = 54, [0][1][2][1][RTW89_MKK][0][34] = 16, [0][1][2][1][RTW89_IC][1][34] = -4, + [0][1][2][1][RTW89_IC][2][34] = 68, [0][1][2][1][RTW89_KCC][1][34] = 12, [0][1][2][1][RTW89_KCC][0][34] = 14, [0][1][2][1][RTW89_ACMA][1][34] = 42, @@ -42395,6 +43643,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][34] = 6, [0][1][2][1][RTW89_UK][1][34] = 42, [0][1][2][1][RTW89_UK][0][34] = 6, + [0][1][2][1][RTW89_THAILAND][1][34] = 42, + [0][1][2][1][RTW89_THAILAND][0][34] = -4, [0][1][2][1][RTW89_FCC][1][36] = -4, [0][1][2][1][RTW89_FCC][2][36] = 68, [0][1][2][1][RTW89_ETSI][1][36] = 42, @@ -42402,6 +43652,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][36] = 54, [0][1][2][1][RTW89_MKK][0][36] = 16, [0][1][2][1][RTW89_IC][1][36] = -4, + [0][1][2][1][RTW89_IC][2][36] = 68, [0][1][2][1][RTW89_KCC][1][36] = 12, [0][1][2][1][RTW89_KCC][0][36] = 14, [0][1][2][1][RTW89_ACMA][1][36] = 42, @@ -42411,6 +43662,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][36] = 6, [0][1][2][1][RTW89_UK][1][36] = 42, [0][1][2][1][RTW89_UK][0][36] = 6, + [0][1][2][1][RTW89_THAILAND][1][36] = 42, + [0][1][2][1][RTW89_THAILAND][0][36] = -4, [0][1][2][1][RTW89_FCC][1][38] = -4, [0][1][2][1][RTW89_FCC][2][38] = 68, [0][1][2][1][RTW89_ETSI][1][38] = 42, @@ -42418,6 +43671,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][38] = 54, [0][1][2][1][RTW89_MKK][0][38] = 16, [0][1][2][1][RTW89_IC][1][38] = -4, + [0][1][2][1][RTW89_IC][2][38] = 68, [0][1][2][1][RTW89_KCC][1][38] = 12, [0][1][2][1][RTW89_KCC][0][38] = 14, [0][1][2][1][RTW89_ACMA][1][38] = 42, @@ -42427,6 +43681,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][38] = 6, [0][1][2][1][RTW89_UK][1][38] = 42, [0][1][2][1][RTW89_UK][0][38] = 6, + [0][1][2][1][RTW89_THAILAND][1][38] = 42, + [0][1][2][1][RTW89_THAILAND][0][38] = -4, [0][1][2][1][RTW89_FCC][1][40] = -4, [0][1][2][1][RTW89_FCC][2][40] = 68, [0][1][2][1][RTW89_ETSI][1][40] = 42, @@ -42434,6 +43690,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][40] = 54, [0][1][2][1][RTW89_MKK][0][40] = 16, [0][1][2][1][RTW89_IC][1][40] = -4, + [0][1][2][1][RTW89_IC][2][40] = 68, [0][1][2][1][RTW89_KCC][1][40] = 12, [0][1][2][1][RTW89_KCC][0][40] = 14, [0][1][2][1][RTW89_ACMA][1][40] = 42, @@ -42443,6 +43700,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][40] = 6, [0][1][2][1][RTW89_UK][1][40] = 42, [0][1][2][1][RTW89_UK][0][40] = 6, + [0][1][2][1][RTW89_THAILAND][1][40] = 42, + [0][1][2][1][RTW89_THAILAND][0][40] = -4, [0][1][2][1][RTW89_FCC][1][42] = -4, [0][1][2][1][RTW89_FCC][2][42] = 68, [0][1][2][1][RTW89_ETSI][1][42] = 42, @@ -42450,6 +43709,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][42] = 54, [0][1][2][1][RTW89_MKK][0][42] = 16, [0][1][2][1][RTW89_IC][1][42] = -4, + [0][1][2][1][RTW89_IC][2][42] = 68, [0][1][2][1][RTW89_KCC][1][42] = 12, [0][1][2][1][RTW89_KCC][0][42] = 14, [0][1][2][1][RTW89_ACMA][1][42] = 42, @@ -42459,6 +43719,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][42] = 6, [0][1][2][1][RTW89_UK][1][42] = 42, [0][1][2][1][RTW89_UK][0][42] = 6, + [0][1][2][1][RTW89_THAILAND][1][42] = 42, + [0][1][2][1][RTW89_THAILAND][0][42] = -4, [0][1][2][1][RTW89_FCC][1][44] = -2, [0][1][2][1][RTW89_FCC][2][44] = 68, [0][1][2][1][RTW89_ETSI][1][44] = 42, @@ -42466,6 +43728,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][44] = 34, [0][1][2][1][RTW89_MKK][0][44] = 16, [0][1][2][1][RTW89_IC][1][44] = -2, + [0][1][2][1][RTW89_IC][2][44] = 68, [0][1][2][1][RTW89_KCC][1][44] = 12, [0][1][2][1][RTW89_KCC][0][44] = 12, [0][1][2][1][RTW89_ACMA][1][44] = 42, @@ -42475,6 +43738,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][44] = 6, [0][1][2][1][RTW89_UK][1][44] = 42, [0][1][2][1][RTW89_UK][0][44] = 6, + [0][1][2][1][RTW89_THAILAND][1][44] = 42, + [0][1][2][1][RTW89_THAILAND][0][44] = -2, [0][1][2][1][RTW89_FCC][1][45] = -2, [0][1][2][1][RTW89_FCC][2][45] = 127, [0][1][2][1][RTW89_ETSI][1][45] = 127, @@ -42482,6 +43747,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][45] = 127, [0][1][2][1][RTW89_MKK][0][45] = 127, [0][1][2][1][RTW89_IC][1][45] = -2, + [0][1][2][1][RTW89_IC][2][45] = 70, [0][1][2][1][RTW89_KCC][1][45] = 12, [0][1][2][1][RTW89_KCC][0][45] = 127, [0][1][2][1][RTW89_ACMA][1][45] = 127, @@ -42491,6 +43757,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][45] = 127, [0][1][2][1][RTW89_UK][1][45] = 127, [0][1][2][1][RTW89_UK][0][45] = 127, + [0][1][2][1][RTW89_THAILAND][1][45] = 127, + [0][1][2][1][RTW89_THAILAND][0][45] = 127, [0][1][2][1][RTW89_FCC][1][47] = -2, [0][1][2][1][RTW89_FCC][2][47] = 127, [0][1][2][1][RTW89_ETSI][1][47] = 127, @@ -42498,6 +43766,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][47] = 127, [0][1][2][1][RTW89_MKK][0][47] = 127, [0][1][2][1][RTW89_IC][1][47] = -2, + [0][1][2][1][RTW89_IC][2][47] = 68, [0][1][2][1][RTW89_KCC][1][47] = 12, [0][1][2][1][RTW89_KCC][0][47] = 127, [0][1][2][1][RTW89_ACMA][1][47] = 127, @@ -42507,6 +43776,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][47] = 127, [0][1][2][1][RTW89_UK][1][47] = 127, [0][1][2][1][RTW89_UK][0][47] = 127, + [0][1][2][1][RTW89_THAILAND][1][47] = 127, + [0][1][2][1][RTW89_THAILAND][0][47] = 127, [0][1][2][1][RTW89_FCC][1][49] = -2, [0][1][2][1][RTW89_FCC][2][49] = 127, [0][1][2][1][RTW89_ETSI][1][49] = 127, @@ -42514,6 +43785,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][49] = 127, [0][1][2][1][RTW89_MKK][0][49] = 127, [0][1][2][1][RTW89_IC][1][49] = -2, + [0][1][2][1][RTW89_IC][2][49] = 68, [0][1][2][1][RTW89_KCC][1][49] = 12, [0][1][2][1][RTW89_KCC][0][49] = 127, [0][1][2][1][RTW89_ACMA][1][49] = 127, @@ -42523,6 +43795,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][49] = 127, [0][1][2][1][RTW89_UK][1][49] = 127, [0][1][2][1][RTW89_UK][0][49] = 127, + [0][1][2][1][RTW89_THAILAND][1][49] = 127, + [0][1][2][1][RTW89_THAILAND][0][49] = 127, [0][1][2][1][RTW89_FCC][1][51] = -2, [0][1][2][1][RTW89_FCC][2][51] = 127, [0][1][2][1][RTW89_ETSI][1][51] = 127, @@ -42530,6 +43804,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][51] = 127, [0][1][2][1][RTW89_MKK][0][51] = 127, [0][1][2][1][RTW89_IC][1][51] = -2, + [0][1][2][1][RTW89_IC][2][51] = 68, [0][1][2][1][RTW89_KCC][1][51] = 12, [0][1][2][1][RTW89_KCC][0][51] = 127, [0][1][2][1][RTW89_ACMA][1][51] = 127, @@ -42539,6 +43814,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][51] = 127, [0][1][2][1][RTW89_UK][1][51] = 127, [0][1][2][1][RTW89_UK][0][51] = 127, + [0][1][2][1][RTW89_THAILAND][1][51] = 127, + [0][1][2][1][RTW89_THAILAND][0][51] = 127, [0][1][2][1][RTW89_FCC][1][53] = -2, [0][1][2][1][RTW89_FCC][2][53] = 127, [0][1][2][1][RTW89_ETSI][1][53] = 127, @@ -42546,6 +43823,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][53] = 127, [0][1][2][1][RTW89_MKK][0][53] = 127, [0][1][2][1][RTW89_IC][1][53] = -2, + [0][1][2][1][RTW89_IC][2][53] = 68, [0][1][2][1][RTW89_KCC][1][53] = 12, [0][1][2][1][RTW89_KCC][0][53] = 127, [0][1][2][1][RTW89_ACMA][1][53] = 127, @@ -42555,6 +43833,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][53] = 127, [0][1][2][1][RTW89_UK][1][53] = 127, [0][1][2][1][RTW89_UK][0][53] = 127, + [0][1][2][1][RTW89_THAILAND][1][53] = 127, + [0][1][2][1][RTW89_THAILAND][0][53] = 127, [0][1][2][1][RTW89_FCC][1][55] = -2, [0][1][2][1][RTW89_FCC][2][55] = 68, [0][1][2][1][RTW89_ETSI][1][55] = 127, @@ -42562,6 +43842,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][55] = 127, [0][1][2][1][RTW89_MKK][0][55] = 127, [0][1][2][1][RTW89_IC][1][55] = -2, + [0][1][2][1][RTW89_IC][2][55] = 68, [0][1][2][1][RTW89_KCC][1][55] = 12, [0][1][2][1][RTW89_KCC][0][55] = 127, [0][1][2][1][RTW89_ACMA][1][55] = 127, @@ -42571,6 +43852,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][55] = 127, [0][1][2][1][RTW89_UK][1][55] = 127, [0][1][2][1][RTW89_UK][0][55] = 127, + [0][1][2][1][RTW89_THAILAND][1][55] = 127, + [0][1][2][1][RTW89_THAILAND][0][55] = 127, [0][1][2][1][RTW89_FCC][1][57] = -2, [0][1][2][1][RTW89_FCC][2][57] = 68, [0][1][2][1][RTW89_ETSI][1][57] = 127, @@ -42578,6 +43861,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][57] = 127, [0][1][2][1][RTW89_MKK][0][57] = 127, [0][1][2][1][RTW89_IC][1][57] = -2, + [0][1][2][1][RTW89_IC][2][57] = 68, [0][1][2][1][RTW89_KCC][1][57] = 12, [0][1][2][1][RTW89_KCC][0][57] = 127, [0][1][2][1][RTW89_ACMA][1][57] = 127, @@ -42587,6 +43871,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][57] = 127, [0][1][2][1][RTW89_UK][1][57] = 127, [0][1][2][1][RTW89_UK][0][57] = 127, + [0][1][2][1][RTW89_THAILAND][1][57] = 127, + [0][1][2][1][RTW89_THAILAND][0][57] = 127, [0][1][2][1][RTW89_FCC][1][59] = -2, [0][1][2][1][RTW89_FCC][2][59] = 68, [0][1][2][1][RTW89_ETSI][1][59] = 127, @@ -42594,6 +43880,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][59] = 127, [0][1][2][1][RTW89_MKK][0][59] = 127, [0][1][2][1][RTW89_IC][1][59] = -2, + [0][1][2][1][RTW89_IC][2][59] = 68, [0][1][2][1][RTW89_KCC][1][59] = 12, [0][1][2][1][RTW89_KCC][0][59] = 127, [0][1][2][1][RTW89_ACMA][1][59] = 127, @@ -42603,6 +43890,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][59] = 127, [0][1][2][1][RTW89_UK][1][59] = 127, [0][1][2][1][RTW89_UK][0][59] = 127, + [0][1][2][1][RTW89_THAILAND][1][59] = 127, + [0][1][2][1][RTW89_THAILAND][0][59] = 127, [0][1][2][1][RTW89_FCC][1][60] = -2, [0][1][2][1][RTW89_FCC][2][60] = 68, [0][1][2][1][RTW89_ETSI][1][60] = 127, @@ -42610,6 +43899,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][60] = 127, [0][1][2][1][RTW89_MKK][0][60] = 127, [0][1][2][1][RTW89_IC][1][60] = -2, + [0][1][2][1][RTW89_IC][2][60] = 68, [0][1][2][1][RTW89_KCC][1][60] = 12, [0][1][2][1][RTW89_KCC][0][60] = 127, [0][1][2][1][RTW89_ACMA][1][60] = 127, @@ -42619,6 +43909,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][60] = 127, [0][1][2][1][RTW89_UK][1][60] = 127, [0][1][2][1][RTW89_UK][0][60] = 127, + [0][1][2][1][RTW89_THAILAND][1][60] = 127, + [0][1][2][1][RTW89_THAILAND][0][60] = 127, [0][1][2][1][RTW89_FCC][1][62] = -2, [0][1][2][1][RTW89_FCC][2][62] = 68, [0][1][2][1][RTW89_ETSI][1][62] = 127, @@ -42626,6 +43918,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][62] = 127, [0][1][2][1][RTW89_MKK][0][62] = 127, [0][1][2][1][RTW89_IC][1][62] = -2, + [0][1][2][1][RTW89_IC][2][62] = 68, [0][1][2][1][RTW89_KCC][1][62] = 12, [0][1][2][1][RTW89_KCC][0][62] = 127, [0][1][2][1][RTW89_ACMA][1][62] = 127, @@ -42635,6 +43928,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][62] = 127, [0][1][2][1][RTW89_UK][1][62] = 127, [0][1][2][1][RTW89_UK][0][62] = 127, + [0][1][2][1][RTW89_THAILAND][1][62] = 127, + [0][1][2][1][RTW89_THAILAND][0][62] = 127, [0][1][2][1][RTW89_FCC][1][64] = -2, [0][1][2][1][RTW89_FCC][2][64] = 68, [0][1][2][1][RTW89_ETSI][1][64] = 127, @@ -42642,6 +43937,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][64] = 127, [0][1][2][1][RTW89_MKK][0][64] = 127, [0][1][2][1][RTW89_IC][1][64] = -2, + [0][1][2][1][RTW89_IC][2][64] = 68, [0][1][2][1][RTW89_KCC][1][64] = 12, [0][1][2][1][RTW89_KCC][0][64] = 127, [0][1][2][1][RTW89_ACMA][1][64] = 127, @@ -42651,6 +43947,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][64] = 127, [0][1][2][1][RTW89_UK][1][64] = 127, [0][1][2][1][RTW89_UK][0][64] = 127, + [0][1][2][1][RTW89_THAILAND][1][64] = 127, + [0][1][2][1][RTW89_THAILAND][0][64] = 127, [0][1][2][1][RTW89_FCC][1][66] = -2, [0][1][2][1][RTW89_FCC][2][66] = 68, [0][1][2][1][RTW89_ETSI][1][66] = 127, @@ -42658,6 +43956,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][66] = 127, [0][1][2][1][RTW89_MKK][0][66] = 127, [0][1][2][1][RTW89_IC][1][66] = -2, + [0][1][2][1][RTW89_IC][2][66] = 68, [0][1][2][1][RTW89_KCC][1][66] = 12, [0][1][2][1][RTW89_KCC][0][66] = 127, [0][1][2][1][RTW89_ACMA][1][66] = 127, @@ -42667,6 +43966,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][66] = 127, [0][1][2][1][RTW89_UK][1][66] = 127, [0][1][2][1][RTW89_UK][0][66] = 127, + [0][1][2][1][RTW89_THAILAND][1][66] = 127, + [0][1][2][1][RTW89_THAILAND][0][66] = 127, [0][1][2][1][RTW89_FCC][1][68] = -2, [0][1][2][1][RTW89_FCC][2][68] = 68, [0][1][2][1][RTW89_ETSI][1][68] = 127, @@ -42674,6 +43975,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][68] = 127, [0][1][2][1][RTW89_MKK][0][68] = 127, [0][1][2][1][RTW89_IC][1][68] = -2, + [0][1][2][1][RTW89_IC][2][68] = 68, [0][1][2][1][RTW89_KCC][1][68] = 12, [0][1][2][1][RTW89_KCC][0][68] = 127, [0][1][2][1][RTW89_ACMA][1][68] = 127, @@ -42683,6 +43985,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][68] = 127, [0][1][2][1][RTW89_UK][1][68] = 127, [0][1][2][1][RTW89_UK][0][68] = 127, + [0][1][2][1][RTW89_THAILAND][1][68] = 127, + [0][1][2][1][RTW89_THAILAND][0][68] = 127, [0][1][2][1][RTW89_FCC][1][70] = -2, [0][1][2][1][RTW89_FCC][2][70] = 68, [0][1][2][1][RTW89_ETSI][1][70] = 127, @@ -42690,6 +43994,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][70] = 127, [0][1][2][1][RTW89_MKK][0][70] = 127, [0][1][2][1][RTW89_IC][1][70] = -2, + [0][1][2][1][RTW89_IC][2][70] = 68, [0][1][2][1][RTW89_KCC][1][70] = 12, [0][1][2][1][RTW89_KCC][0][70] = 127, [0][1][2][1][RTW89_ACMA][1][70] = 127, @@ -42699,6 +44004,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][70] = 127, [0][1][2][1][RTW89_UK][1][70] = 127, [0][1][2][1][RTW89_UK][0][70] = 127, + [0][1][2][1][RTW89_THAILAND][1][70] = 127, + [0][1][2][1][RTW89_THAILAND][0][70] = 127, [0][1][2][1][RTW89_FCC][1][72] = -2, [0][1][2][1][RTW89_FCC][2][72] = 68, [0][1][2][1][RTW89_ETSI][1][72] = 127, @@ -42706,6 +44013,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][72] = 127, [0][1][2][1][RTW89_MKK][0][72] = 127, [0][1][2][1][RTW89_IC][1][72] = -2, + [0][1][2][1][RTW89_IC][2][72] = 68, [0][1][2][1][RTW89_KCC][1][72] = 12, [0][1][2][1][RTW89_KCC][0][72] = 127, [0][1][2][1][RTW89_ACMA][1][72] = 127, @@ -42715,6 +44023,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][72] = 127, [0][1][2][1][RTW89_UK][1][72] = 127, [0][1][2][1][RTW89_UK][0][72] = 127, + [0][1][2][1][RTW89_THAILAND][1][72] = 127, + [0][1][2][1][RTW89_THAILAND][0][72] = 127, [0][1][2][1][RTW89_FCC][1][74] = -2, [0][1][2][1][RTW89_FCC][2][74] = 68, [0][1][2][1][RTW89_ETSI][1][74] = 127, @@ -42722,6 +44032,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][74] = 127, [0][1][2][1][RTW89_MKK][0][74] = 127, [0][1][2][1][RTW89_IC][1][74] = -2, + [0][1][2][1][RTW89_IC][2][74] = 68, [0][1][2][1][RTW89_KCC][1][74] = 12, [0][1][2][1][RTW89_KCC][0][74] = 127, [0][1][2][1][RTW89_ACMA][1][74] = 127, @@ -42731,6 +44042,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][74] = 127, [0][1][2][1][RTW89_UK][1][74] = 127, [0][1][2][1][RTW89_UK][0][74] = 127, + [0][1][2][1][RTW89_THAILAND][1][74] = 127, + [0][1][2][1][RTW89_THAILAND][0][74] = 127, [0][1][2][1][RTW89_FCC][1][75] = -2, [0][1][2][1][RTW89_FCC][2][75] = 68, [0][1][2][1][RTW89_ETSI][1][75] = 127, @@ -42738,6 +44051,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][75] = 127, [0][1][2][1][RTW89_MKK][0][75] = 127, [0][1][2][1][RTW89_IC][1][75] = -2, + [0][1][2][1][RTW89_IC][2][75] = 68, [0][1][2][1][RTW89_KCC][1][75] = 12, [0][1][2][1][RTW89_KCC][0][75] = 127, [0][1][2][1][RTW89_ACMA][1][75] = 127, @@ -42747,6 +44061,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][75] = 127, [0][1][2][1][RTW89_UK][1][75] = 127, [0][1][2][1][RTW89_UK][0][75] = 127, + [0][1][2][1][RTW89_THAILAND][1][75] = 127, + [0][1][2][1][RTW89_THAILAND][0][75] = 127, [0][1][2][1][RTW89_FCC][1][77] = -2, [0][1][2][1][RTW89_FCC][2][77] = 68, [0][1][2][1][RTW89_ETSI][1][77] = 127, @@ -42754,6 +44070,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][77] = 127, [0][1][2][1][RTW89_MKK][0][77] = 127, [0][1][2][1][RTW89_IC][1][77] = -2, + [0][1][2][1][RTW89_IC][2][77] = 68, [0][1][2][1][RTW89_KCC][1][77] = 12, [0][1][2][1][RTW89_KCC][0][77] = 127, [0][1][2][1][RTW89_ACMA][1][77] = 127, @@ -42763,6 +44080,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][77] = 127, [0][1][2][1][RTW89_UK][1][77] = 127, [0][1][2][1][RTW89_UK][0][77] = 127, + [0][1][2][1][RTW89_THAILAND][1][77] = 127, + [0][1][2][1][RTW89_THAILAND][0][77] = 127, [0][1][2][1][RTW89_FCC][1][79] = -2, [0][1][2][1][RTW89_FCC][2][79] = 68, [0][1][2][1][RTW89_ETSI][1][79] = 127, @@ -42770,6 +44089,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][79] = 127, [0][1][2][1][RTW89_MKK][0][79] = 127, [0][1][2][1][RTW89_IC][1][79] = -2, + [0][1][2][1][RTW89_IC][2][79] = 68, [0][1][2][1][RTW89_KCC][1][79] = 12, [0][1][2][1][RTW89_KCC][0][79] = 127, [0][1][2][1][RTW89_ACMA][1][79] = 127, @@ -42779,6 +44099,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][79] = 127, [0][1][2][1][RTW89_UK][1][79] = 127, [0][1][2][1][RTW89_UK][0][79] = 127, + [0][1][2][1][RTW89_THAILAND][1][79] = 127, + [0][1][2][1][RTW89_THAILAND][0][79] = 127, [0][1][2][1][RTW89_FCC][1][81] = -2, [0][1][2][1][RTW89_FCC][2][81] = 68, [0][1][2][1][RTW89_ETSI][1][81] = 127, @@ -42786,6 +44108,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][81] = 127, [0][1][2][1][RTW89_MKK][0][81] = 127, [0][1][2][1][RTW89_IC][1][81] = -2, + [0][1][2][1][RTW89_IC][2][81] = 68, [0][1][2][1][RTW89_KCC][1][81] = 12, [0][1][2][1][RTW89_KCC][0][81] = 127, [0][1][2][1][RTW89_ACMA][1][81] = 127, @@ -42795,6 +44118,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][81] = 127, [0][1][2][1][RTW89_UK][1][81] = 127, [0][1][2][1][RTW89_UK][0][81] = 127, + [0][1][2][1][RTW89_THAILAND][1][81] = 127, + [0][1][2][1][RTW89_THAILAND][0][81] = 127, [0][1][2][1][RTW89_FCC][1][83] = -2, [0][1][2][1][RTW89_FCC][2][83] = 68, [0][1][2][1][RTW89_ETSI][1][83] = 127, @@ -42802,6 +44127,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][83] = 127, [0][1][2][1][RTW89_MKK][0][83] = 127, [0][1][2][1][RTW89_IC][1][83] = -2, + [0][1][2][1][RTW89_IC][2][83] = 68, [0][1][2][1][RTW89_KCC][1][83] = 20, [0][1][2][1][RTW89_KCC][0][83] = 127, [0][1][2][1][RTW89_ACMA][1][83] = 127, @@ -42811,6 +44137,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][83] = 127, [0][1][2][1][RTW89_UK][1][83] = 127, [0][1][2][1][RTW89_UK][0][83] = 127, + [0][1][2][1][RTW89_THAILAND][1][83] = 127, + [0][1][2][1][RTW89_THAILAND][0][83] = 127, [0][1][2][1][RTW89_FCC][1][85] = -2, [0][1][2][1][RTW89_FCC][2][85] = 68, [0][1][2][1][RTW89_ETSI][1][85] = 127, @@ -42818,6 +44146,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][85] = 127, [0][1][2][1][RTW89_MKK][0][85] = 127, [0][1][2][1][RTW89_IC][1][85] = -2, + [0][1][2][1][RTW89_IC][2][85] = 68, [0][1][2][1][RTW89_KCC][1][85] = 20, [0][1][2][1][RTW89_KCC][0][85] = 127, [0][1][2][1][RTW89_ACMA][1][85] = 127, @@ -42827,6 +44156,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][85] = 127, [0][1][2][1][RTW89_UK][1][85] = 127, [0][1][2][1][RTW89_UK][0][85] = 127, + [0][1][2][1][RTW89_THAILAND][1][85] = 127, + [0][1][2][1][RTW89_THAILAND][0][85] = 127, [0][1][2][1][RTW89_FCC][1][87] = -2, [0][1][2][1][RTW89_FCC][2][87] = 127, [0][1][2][1][RTW89_ETSI][1][87] = 127, @@ -42834,6 +44165,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][87] = 127, [0][1][2][1][RTW89_MKK][0][87] = 127, [0][1][2][1][RTW89_IC][1][87] = -2, + [0][1][2][1][RTW89_IC][2][87] = 127, [0][1][2][1][RTW89_KCC][1][87] = 20, [0][1][2][1][RTW89_KCC][0][87] = 127, [0][1][2][1][RTW89_ACMA][1][87] = 127, @@ -42843,6 +44175,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][87] = 127, [0][1][2][1][RTW89_UK][1][87] = 127, [0][1][2][1][RTW89_UK][0][87] = 127, + [0][1][2][1][RTW89_THAILAND][1][87] = 127, + [0][1][2][1][RTW89_THAILAND][0][87] = 127, [0][1][2][1][RTW89_FCC][1][89] = -2, [0][1][2][1][RTW89_FCC][2][89] = 127, [0][1][2][1][RTW89_ETSI][1][89] = 127, @@ -42850,6 +44184,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][89] = 127, [0][1][2][1][RTW89_MKK][0][89] = 127, [0][1][2][1][RTW89_IC][1][89] = -2, + [0][1][2][1][RTW89_IC][2][89] = 127, [0][1][2][1][RTW89_KCC][1][89] = 20, [0][1][2][1][RTW89_KCC][0][89] = 127, [0][1][2][1][RTW89_ACMA][1][89] = 127, @@ -42859,6 +44194,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][89] = 127, [0][1][2][1][RTW89_UK][1][89] = 127, [0][1][2][1][RTW89_UK][0][89] = 127, + [0][1][2][1][RTW89_THAILAND][1][89] = 127, + [0][1][2][1][RTW89_THAILAND][0][89] = 127, [0][1][2][1][RTW89_FCC][1][90] = -2, [0][1][2][1][RTW89_FCC][2][90] = 127, [0][1][2][1][RTW89_ETSI][1][90] = 127, @@ -42866,6 +44203,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][90] = 127, [0][1][2][1][RTW89_MKK][0][90] = 127, [0][1][2][1][RTW89_IC][1][90] = -2, + [0][1][2][1][RTW89_IC][2][90] = 127, [0][1][2][1][RTW89_KCC][1][90] = 20, [0][1][2][1][RTW89_KCC][0][90] = 127, [0][1][2][1][RTW89_ACMA][1][90] = 127, @@ -42875,6 +44213,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][90] = 127, [0][1][2][1][RTW89_UK][1][90] = 127, [0][1][2][1][RTW89_UK][0][90] = 127, + [0][1][2][1][RTW89_THAILAND][1][90] = 127, + [0][1][2][1][RTW89_THAILAND][0][90] = 127, [0][1][2][1][RTW89_FCC][1][92] = -2, [0][1][2][1][RTW89_FCC][2][92] = 127, [0][1][2][1][RTW89_ETSI][1][92] = 127, @@ -42882,6 +44222,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][92] = 127, [0][1][2][1][RTW89_MKK][0][92] = 127, [0][1][2][1][RTW89_IC][1][92] = -2, + [0][1][2][1][RTW89_IC][2][92] = 127, [0][1][2][1][RTW89_KCC][1][92] = 20, [0][1][2][1][RTW89_KCC][0][92] = 127, [0][1][2][1][RTW89_ACMA][1][92] = 127, @@ -42891,6 +44232,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][92] = 127, [0][1][2][1][RTW89_UK][1][92] = 127, [0][1][2][1][RTW89_UK][0][92] = 127, + [0][1][2][1][RTW89_THAILAND][1][92] = 127, + [0][1][2][1][RTW89_THAILAND][0][92] = 127, [0][1][2][1][RTW89_FCC][1][94] = -2, [0][1][2][1][RTW89_FCC][2][94] = 127, [0][1][2][1][RTW89_ETSI][1][94] = 127, @@ -42898,6 +44241,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][94] = 127, [0][1][2][1][RTW89_MKK][0][94] = 127, [0][1][2][1][RTW89_IC][1][94] = -2, + [0][1][2][1][RTW89_IC][2][94] = 127, [0][1][2][1][RTW89_KCC][1][94] = 20, [0][1][2][1][RTW89_KCC][0][94] = 127, [0][1][2][1][RTW89_ACMA][1][94] = 127, @@ -42907,6 +44251,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][94] = 127, [0][1][2][1][RTW89_UK][1][94] = 127, [0][1][2][1][RTW89_UK][0][94] = 127, + [0][1][2][1][RTW89_THAILAND][1][94] = 127, + [0][1][2][1][RTW89_THAILAND][0][94] = 127, [0][1][2][1][RTW89_FCC][1][96] = -2, [0][1][2][1][RTW89_FCC][2][96] = 127, [0][1][2][1][RTW89_ETSI][1][96] = 127, @@ -42914,6 +44260,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][96] = 127, [0][1][2][1][RTW89_MKK][0][96] = 127, [0][1][2][1][RTW89_IC][1][96] = -2, + [0][1][2][1][RTW89_IC][2][96] = 127, [0][1][2][1][RTW89_KCC][1][96] = 20, [0][1][2][1][RTW89_KCC][0][96] = 127, [0][1][2][1][RTW89_ACMA][1][96] = 127, @@ -42923,6 +44270,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][96] = 127, [0][1][2][1][RTW89_UK][1][96] = 127, [0][1][2][1][RTW89_UK][0][96] = 127, + [0][1][2][1][RTW89_THAILAND][1][96] = 127, + [0][1][2][1][RTW89_THAILAND][0][96] = 127, [0][1][2][1][RTW89_FCC][1][98] = -2, [0][1][2][1][RTW89_FCC][2][98] = 127, [0][1][2][1][RTW89_ETSI][1][98] = 127, @@ -42930,6 +44279,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][98] = 127, [0][1][2][1][RTW89_MKK][0][98] = 127, [0][1][2][1][RTW89_IC][1][98] = -2, + [0][1][2][1][RTW89_IC][2][98] = 127, [0][1][2][1][RTW89_KCC][1][98] = 20, [0][1][2][1][RTW89_KCC][0][98] = 127, [0][1][2][1][RTW89_ACMA][1][98] = 127, @@ -42939,6 +44289,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][98] = 127, [0][1][2][1][RTW89_UK][1][98] = 127, [0][1][2][1][RTW89_UK][0][98] = 127, + [0][1][2][1][RTW89_THAILAND][1][98] = 127, + [0][1][2][1][RTW89_THAILAND][0][98] = 127, [0][1][2][1][RTW89_FCC][1][100] = -2, [0][1][2][1][RTW89_FCC][2][100] = 127, [0][1][2][1][RTW89_ETSI][1][100] = 127, @@ -42946,6 +44298,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][100] = 127, [0][1][2][1][RTW89_MKK][0][100] = 127, [0][1][2][1][RTW89_IC][1][100] = -2, + [0][1][2][1][RTW89_IC][2][100] = 127, [0][1][2][1][RTW89_KCC][1][100] = 20, [0][1][2][1][RTW89_KCC][0][100] = 127, [0][1][2][1][RTW89_ACMA][1][100] = 127, @@ -42955,6 +44308,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][100] = 127, [0][1][2][1][RTW89_UK][1][100] = 127, [0][1][2][1][RTW89_UK][0][100] = 127, + [0][1][2][1][RTW89_THAILAND][1][100] = 127, + [0][1][2][1][RTW89_THAILAND][0][100] = 127, [0][1][2][1][RTW89_FCC][1][102] = -2, [0][1][2][1][RTW89_FCC][2][102] = 127, [0][1][2][1][RTW89_ETSI][1][102] = 127, @@ -42962,6 +44317,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][102] = 127, [0][1][2][1][RTW89_MKK][0][102] = 127, [0][1][2][1][RTW89_IC][1][102] = -2, + [0][1][2][1][RTW89_IC][2][102] = 127, [0][1][2][1][RTW89_KCC][1][102] = 20, [0][1][2][1][RTW89_KCC][0][102] = 127, [0][1][2][1][RTW89_ACMA][1][102] = 127, @@ -42971,6 +44327,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][102] = 127, [0][1][2][1][RTW89_UK][1][102] = 127, [0][1][2][1][RTW89_UK][0][102] = 127, + [0][1][2][1][RTW89_THAILAND][1][102] = 127, + [0][1][2][1][RTW89_THAILAND][0][102] = 127, [0][1][2][1][RTW89_FCC][1][104] = -2, [0][1][2][1][RTW89_FCC][2][104] = 127, [0][1][2][1][RTW89_ETSI][1][104] = 127, @@ -42978,6 +44336,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][104] = 127, [0][1][2][1][RTW89_MKK][0][104] = 127, [0][1][2][1][RTW89_IC][1][104] = -2, + [0][1][2][1][RTW89_IC][2][104] = 127, [0][1][2][1][RTW89_KCC][1][104] = 20, [0][1][2][1][RTW89_KCC][0][104] = 127, [0][1][2][1][RTW89_ACMA][1][104] = 127, @@ -42987,6 +44346,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][104] = 127, [0][1][2][1][RTW89_UK][1][104] = 127, [0][1][2][1][RTW89_UK][0][104] = 127, + [0][1][2][1][RTW89_THAILAND][1][104] = 127, + [0][1][2][1][RTW89_THAILAND][0][104] = 127, [0][1][2][1][RTW89_FCC][1][105] = -2, [0][1][2][1][RTW89_FCC][2][105] = 127, [0][1][2][1][RTW89_ETSI][1][105] = 127, @@ -42994,6 +44355,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][105] = 127, [0][1][2][1][RTW89_MKK][0][105] = 127, [0][1][2][1][RTW89_IC][1][105] = -2, + [0][1][2][1][RTW89_IC][2][105] = 127, [0][1][2][1][RTW89_KCC][1][105] = 20, [0][1][2][1][RTW89_KCC][0][105] = 127, [0][1][2][1][RTW89_ACMA][1][105] = 127, @@ -43003,6 +44365,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][105] = 127, [0][1][2][1][RTW89_UK][1][105] = 127, [0][1][2][1][RTW89_UK][0][105] = 127, + [0][1][2][1][RTW89_THAILAND][1][105] = 127, + [0][1][2][1][RTW89_THAILAND][0][105] = 127, [0][1][2][1][RTW89_FCC][1][107] = 1, [0][1][2][1][RTW89_FCC][2][107] = 127, [0][1][2][1][RTW89_ETSI][1][107] = 127, @@ -43010,6 +44374,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][107] = 127, [0][1][2][1][RTW89_MKK][0][107] = 127, [0][1][2][1][RTW89_IC][1][107] = 1, + [0][1][2][1][RTW89_IC][2][107] = 127, [0][1][2][1][RTW89_KCC][1][107] = 20, [0][1][2][1][RTW89_KCC][0][107] = 127, [0][1][2][1][RTW89_ACMA][1][107] = 127, @@ -43019,6 +44384,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][107] = 127, [0][1][2][1][RTW89_UK][1][107] = 127, [0][1][2][1][RTW89_UK][0][107] = 127, + [0][1][2][1][RTW89_THAILAND][1][107] = 127, + [0][1][2][1][RTW89_THAILAND][0][107] = 127, [0][1][2][1][RTW89_FCC][1][109] = 1, [0][1][2][1][RTW89_FCC][2][109] = 127, [0][1][2][1][RTW89_ETSI][1][109] = 127, @@ -43026,6 +44393,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][109] = 127, [0][1][2][1][RTW89_MKK][0][109] = 127, [0][1][2][1][RTW89_IC][1][109] = 1, + [0][1][2][1][RTW89_IC][2][109] = 127, [0][1][2][1][RTW89_KCC][1][109] = 20, [0][1][2][1][RTW89_KCC][0][109] = 127, [0][1][2][1][RTW89_ACMA][1][109] = 127, @@ -43035,6 +44403,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][109] = 127, [0][1][2][1][RTW89_UK][1][109] = 127, [0][1][2][1][RTW89_UK][0][109] = 127, + [0][1][2][1][RTW89_THAILAND][1][109] = 127, + [0][1][2][1][RTW89_THAILAND][0][109] = 127, [0][1][2][1][RTW89_FCC][1][111] = 127, [0][1][2][1][RTW89_FCC][2][111] = 127, [0][1][2][1][RTW89_ETSI][1][111] = 127, @@ -43042,6 +44412,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][111] = 127, [0][1][2][1][RTW89_MKK][0][111] = 127, [0][1][2][1][RTW89_IC][1][111] = 127, + [0][1][2][1][RTW89_IC][2][111] = 127, [0][1][2][1][RTW89_KCC][1][111] = 127, [0][1][2][1][RTW89_KCC][0][111] = 127, [0][1][2][1][RTW89_ACMA][1][111] = 127, @@ -43051,6 +44422,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][111] = 127, [0][1][2][1][RTW89_UK][1][111] = 127, [0][1][2][1][RTW89_UK][0][111] = 127, + [0][1][2][1][RTW89_THAILAND][1][111] = 127, + [0][1][2][1][RTW89_THAILAND][0][111] = 127, [0][1][2][1][RTW89_FCC][1][113] = 127, [0][1][2][1][RTW89_FCC][2][113] = 127, [0][1][2][1][RTW89_ETSI][1][113] = 127, @@ -43058,6 +44431,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][113] = 127, [0][1][2][1][RTW89_MKK][0][113] = 127, [0][1][2][1][RTW89_IC][1][113] = 127, + [0][1][2][1][RTW89_IC][2][113] = 127, [0][1][2][1][RTW89_KCC][1][113] = 127, [0][1][2][1][RTW89_KCC][0][113] = 127, [0][1][2][1][RTW89_ACMA][1][113] = 127, @@ -43067,6 +44441,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][113] = 127, [0][1][2][1][RTW89_UK][1][113] = 127, [0][1][2][1][RTW89_UK][0][113] = 127, + [0][1][2][1][RTW89_THAILAND][1][113] = 127, + [0][1][2][1][RTW89_THAILAND][0][113] = 127, [0][1][2][1][RTW89_FCC][1][115] = 127, [0][1][2][1][RTW89_FCC][2][115] = 127, [0][1][2][1][RTW89_ETSI][1][115] = 127, @@ -43074,6 +44450,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][115] = 127, [0][1][2][1][RTW89_MKK][0][115] = 127, [0][1][2][1][RTW89_IC][1][115] = 127, + [0][1][2][1][RTW89_IC][2][115] = 127, [0][1][2][1][RTW89_KCC][1][115] = 127, [0][1][2][1][RTW89_KCC][0][115] = 127, [0][1][2][1][RTW89_ACMA][1][115] = 127, @@ -43083,6 +44460,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][115] = 127, [0][1][2][1][RTW89_UK][1][115] = 127, [0][1][2][1][RTW89_UK][0][115] = 127, + [0][1][2][1][RTW89_THAILAND][1][115] = 127, + [0][1][2][1][RTW89_THAILAND][0][115] = 127, [0][1][2][1][RTW89_FCC][1][117] = 127, [0][1][2][1][RTW89_FCC][2][117] = 127, [0][1][2][1][RTW89_ETSI][1][117] = 127, @@ -43090,6 +44469,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][117] = 127, [0][1][2][1][RTW89_MKK][0][117] = 127, [0][1][2][1][RTW89_IC][1][117] = 127, + [0][1][2][1][RTW89_IC][2][117] = 127, [0][1][2][1][RTW89_KCC][1][117] = 127, [0][1][2][1][RTW89_KCC][0][117] = 127, [0][1][2][1][RTW89_ACMA][1][117] = 127, @@ -43099,6 +44479,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][117] = 127, [0][1][2][1][RTW89_UK][1][117] = 127, [0][1][2][1][RTW89_UK][0][117] = 127, + [0][1][2][1][RTW89_THAILAND][1][117] = 127, + [0][1][2][1][RTW89_THAILAND][0][117] = 127, [0][1][2][1][RTW89_FCC][1][119] = 127, [0][1][2][1][RTW89_FCC][2][119] = 127, [0][1][2][1][RTW89_ETSI][1][119] = 127, @@ -43106,6 +44488,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][119] = 127, [0][1][2][1][RTW89_MKK][0][119] = 127, [0][1][2][1][RTW89_IC][1][119] = 127, + [0][1][2][1][RTW89_IC][2][119] = 127, [0][1][2][1][RTW89_KCC][1][119] = 127, [0][1][2][1][RTW89_KCC][0][119] = 127, [0][1][2][1][RTW89_ACMA][1][119] = 127, @@ -43115,6 +44498,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][119] = 127, [0][1][2][1][RTW89_UK][1][119] = 127, [0][1][2][1][RTW89_UK][0][119] = 127, + [0][1][2][1][RTW89_THAILAND][1][119] = 127, + [0][1][2][1][RTW89_THAILAND][0][119] = 127, [1][0][2][0][RTW89_FCC][1][1] = 34, [1][0][2][0][RTW89_FCC][2][1] = 70, [1][0][2][0][RTW89_ETSI][1][1] = 66, @@ -43122,6 +44507,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][1] = 62, [1][0][2][0][RTW89_MKK][0][1] = 26, [1][0][2][0][RTW89_IC][1][1] = 34, + [1][0][2][0][RTW89_IC][2][1] = 70, [1][0][2][0][RTW89_KCC][1][1] = 40, [1][0][2][0][RTW89_KCC][0][1] = 24, [1][0][2][0][RTW89_ACMA][1][1] = 66, @@ -43131,6 +44517,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][1] = 30, [1][0][2][0][RTW89_UK][1][1] = 66, [1][0][2][0][RTW89_UK][0][1] = 30, + [1][0][2][0][RTW89_THAILAND][1][1] = 68, + [1][0][2][0][RTW89_THAILAND][0][1] = 30, [1][0][2][0][RTW89_FCC][1][5] = 34, [1][0][2][0][RTW89_FCC][2][5] = 70, [1][0][2][0][RTW89_ETSI][1][5] = 66, @@ -43138,6 +44526,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][5] = 62, [1][0][2][0][RTW89_MKK][0][5] = 26, [1][0][2][0][RTW89_IC][1][5] = 34, + [1][0][2][0][RTW89_IC][2][5] = 70, [1][0][2][0][RTW89_KCC][1][5] = 40, [1][0][2][0][RTW89_KCC][0][5] = 24, [1][0][2][0][RTW89_ACMA][1][5] = 66, @@ -43147,6 +44536,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][5] = 30, [1][0][2][0][RTW89_UK][1][5] = 66, [1][0][2][0][RTW89_UK][0][5] = 30, + [1][0][2][0][RTW89_THAILAND][1][5] = 68, + [1][0][2][0][RTW89_THAILAND][0][5] = 30, [1][0][2][0][RTW89_FCC][1][9] = 34, [1][0][2][0][RTW89_FCC][2][9] = 70, [1][0][2][0][RTW89_ETSI][1][9] = 66, @@ -43154,6 +44545,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][9] = 62, [1][0][2][0][RTW89_MKK][0][9] = 26, [1][0][2][0][RTW89_IC][1][9] = 34, + [1][0][2][0][RTW89_IC][2][9] = 70, [1][0][2][0][RTW89_KCC][1][9] = 40, [1][0][2][0][RTW89_KCC][0][9] = 24, [1][0][2][0][RTW89_ACMA][1][9] = 66, @@ -43163,6 +44555,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][9] = 30, [1][0][2][0][RTW89_UK][1][9] = 66, [1][0][2][0][RTW89_UK][0][9] = 30, + [1][0][2][0][RTW89_THAILAND][1][9] = 68, + [1][0][2][0][RTW89_THAILAND][0][9] = 30, [1][0][2][0][RTW89_FCC][1][13] = 34, [1][0][2][0][RTW89_FCC][2][13] = 70, [1][0][2][0][RTW89_ETSI][1][13] = 66, @@ -43170,6 +44564,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][13] = 62, [1][0][2][0][RTW89_MKK][0][13] = 26, [1][0][2][0][RTW89_IC][1][13] = 34, + [1][0][2][0][RTW89_IC][2][13] = 70, [1][0][2][0][RTW89_KCC][1][13] = 40, [1][0][2][0][RTW89_KCC][0][13] = 24, [1][0][2][0][RTW89_ACMA][1][13] = 66, @@ -43179,6 +44574,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][13] = 30, [1][0][2][0][RTW89_UK][1][13] = 66, [1][0][2][0][RTW89_UK][0][13] = 30, + [1][0][2][0][RTW89_THAILAND][1][13] = 68, + [1][0][2][0][RTW89_THAILAND][0][13] = 30, [1][0][2][0][RTW89_FCC][1][16] = 34, [1][0][2][0][RTW89_FCC][2][16] = 70, [1][0][2][0][RTW89_ETSI][1][16] = 66, @@ -43186,6 +44583,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][16] = 62, [1][0][2][0][RTW89_MKK][0][16] = 26, [1][0][2][0][RTW89_IC][1][16] = 34, + [1][0][2][0][RTW89_IC][2][16] = 70, [1][0][2][0][RTW89_KCC][1][16] = 40, [1][0][2][0][RTW89_KCC][0][16] = 24, [1][0][2][0][RTW89_ACMA][1][16] = 66, @@ -43195,6 +44593,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][16] = 30, [1][0][2][0][RTW89_UK][1][16] = 66, [1][0][2][0][RTW89_UK][0][16] = 30, + [1][0][2][0][RTW89_THAILAND][1][16] = 68, + [1][0][2][0][RTW89_THAILAND][0][16] = 30, [1][0][2][0][RTW89_FCC][1][20] = 34, [1][0][2][0][RTW89_FCC][2][20] = 70, [1][0][2][0][RTW89_ETSI][1][20] = 66, @@ -43202,6 +44602,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][20] = 62, [1][0][2][0][RTW89_MKK][0][20] = 26, [1][0][2][0][RTW89_IC][1][20] = 34, + [1][0][2][0][RTW89_IC][2][20] = 70, [1][0][2][0][RTW89_KCC][1][20] = 40, [1][0][2][0][RTW89_KCC][0][20] = 24, [1][0][2][0][RTW89_ACMA][1][20] = 66, @@ -43211,6 +44612,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][20] = 30, [1][0][2][0][RTW89_UK][1][20] = 66, [1][0][2][0][RTW89_UK][0][20] = 30, + [1][0][2][0][RTW89_THAILAND][1][20] = 68, + [1][0][2][0][RTW89_THAILAND][0][20] = 30, [1][0][2][0][RTW89_FCC][1][24] = 36, [1][0][2][0][RTW89_FCC][2][24] = 70, [1][0][2][0][RTW89_ETSI][1][24] = 66, @@ -43218,6 +44621,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][24] = 64, [1][0][2][0][RTW89_MKK][0][24] = 28, [1][0][2][0][RTW89_IC][1][24] = 36, + [1][0][2][0][RTW89_IC][2][24] = 70, [1][0][2][0][RTW89_KCC][1][24] = 40, [1][0][2][0][RTW89_KCC][0][24] = 26, [1][0][2][0][RTW89_ACMA][1][24] = 66, @@ -43227,6 +44631,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][24] = 30, [1][0][2][0][RTW89_UK][1][24] = 66, [1][0][2][0][RTW89_UK][0][24] = 30, + [1][0][2][0][RTW89_THAILAND][1][24] = 68, + [1][0][2][0][RTW89_THAILAND][0][24] = 30, [1][0][2][0][RTW89_FCC][1][28] = 34, [1][0][2][0][RTW89_FCC][2][28] = 70, [1][0][2][0][RTW89_ETSI][1][28] = 66, @@ -43234,6 +44640,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][28] = 64, [1][0][2][0][RTW89_MKK][0][28] = 26, [1][0][2][0][RTW89_IC][1][28] = 34, + [1][0][2][0][RTW89_IC][2][28] = 70, [1][0][2][0][RTW89_KCC][1][28] = 40, [1][0][2][0][RTW89_KCC][0][28] = 26, [1][0][2][0][RTW89_ACMA][1][28] = 66, @@ -43243,6 +44650,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][28] = 30, [1][0][2][0][RTW89_UK][1][28] = 66, [1][0][2][0][RTW89_UK][0][28] = 30, + [1][0][2][0][RTW89_THAILAND][1][28] = 68, + [1][0][2][0][RTW89_THAILAND][0][28] = 30, [1][0][2][0][RTW89_FCC][1][31] = 34, [1][0][2][0][RTW89_FCC][2][31] = 70, [1][0][2][0][RTW89_ETSI][1][31] = 66, @@ -43250,6 +44659,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][31] = 64, [1][0][2][0][RTW89_MKK][0][31] = 26, [1][0][2][0][RTW89_IC][1][31] = 34, + [1][0][2][0][RTW89_IC][2][31] = 70, [1][0][2][0][RTW89_KCC][1][31] = 40, [1][0][2][0][RTW89_KCC][0][31] = 26, [1][0][2][0][RTW89_ACMA][1][31] = 66, @@ -43259,6 +44669,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][31] = 30, [1][0][2][0][RTW89_UK][1][31] = 66, [1][0][2][0][RTW89_UK][0][31] = 30, + [1][0][2][0][RTW89_THAILAND][1][31] = 68, + [1][0][2][0][RTW89_THAILAND][0][31] = 30, [1][0][2][0][RTW89_FCC][1][35] = 34, [1][0][2][0][RTW89_FCC][2][35] = 70, [1][0][2][0][RTW89_ETSI][1][35] = 66, @@ -43266,6 +44678,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][35] = 64, [1][0][2][0][RTW89_MKK][0][35] = 26, [1][0][2][0][RTW89_IC][1][35] = 34, + [1][0][2][0][RTW89_IC][2][35] = 70, [1][0][2][0][RTW89_KCC][1][35] = 40, [1][0][2][0][RTW89_KCC][0][35] = 26, [1][0][2][0][RTW89_ACMA][1][35] = 66, @@ -43275,6 +44688,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][35] = 30, [1][0][2][0][RTW89_UK][1][35] = 66, [1][0][2][0][RTW89_UK][0][35] = 30, + [1][0][2][0][RTW89_THAILAND][1][35] = 68, + [1][0][2][0][RTW89_THAILAND][0][35] = 30, [1][0][2][0][RTW89_FCC][1][39] = 34, [1][0][2][0][RTW89_FCC][2][39] = 70, [1][0][2][0][RTW89_ETSI][1][39] = 66, @@ -43282,6 +44697,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][39] = 64, [1][0][2][0][RTW89_MKK][0][39] = 26, [1][0][2][0][RTW89_IC][1][39] = 34, + [1][0][2][0][RTW89_IC][2][39] = 70, [1][0][2][0][RTW89_KCC][1][39] = 40, [1][0][2][0][RTW89_KCC][0][39] = 26, [1][0][2][0][RTW89_ACMA][1][39] = 66, @@ -43291,6 +44707,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][39] = 30, [1][0][2][0][RTW89_UK][1][39] = 66, [1][0][2][0][RTW89_UK][0][39] = 30, + [1][0][2][0][RTW89_THAILAND][1][39] = 68, + [1][0][2][0][RTW89_THAILAND][0][39] = 30, [1][0][2][0][RTW89_FCC][1][43] = 34, [1][0][2][0][RTW89_FCC][2][43] = 70, [1][0][2][0][RTW89_ETSI][1][43] = 66, @@ -43298,6 +44716,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][43] = 64, [1][0][2][0][RTW89_MKK][0][43] = 26, [1][0][2][0][RTW89_IC][1][43] = 34, + [1][0][2][0][RTW89_IC][2][43] = 70, [1][0][2][0][RTW89_KCC][1][43] = 40, [1][0][2][0][RTW89_KCC][0][43] = 26, [1][0][2][0][RTW89_ACMA][1][43] = 66, @@ -43307,6 +44726,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][43] = 30, [1][0][2][0][RTW89_UK][1][43] = 66, [1][0][2][0][RTW89_UK][0][43] = 30, + [1][0][2][0][RTW89_THAILAND][1][43] = 68, + [1][0][2][0][RTW89_THAILAND][0][43] = 30, [1][0][2][0][RTW89_FCC][1][46] = 34, [1][0][2][0][RTW89_FCC][2][46] = 127, [1][0][2][0][RTW89_ETSI][1][46] = 127, @@ -43314,6 +44735,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][46] = 127, [1][0][2][0][RTW89_MKK][0][46] = 127, [1][0][2][0][RTW89_IC][1][46] = 34, + [1][0][2][0][RTW89_IC][2][46] = 68, [1][0][2][0][RTW89_KCC][1][46] = 40, [1][0][2][0][RTW89_KCC][0][46] = 127, [1][0][2][0][RTW89_ACMA][1][46] = 127, @@ -43323,6 +44745,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][46] = 127, [1][0][2][0][RTW89_UK][1][46] = 127, [1][0][2][0][RTW89_UK][0][46] = 127, + [1][0][2][0][RTW89_THAILAND][1][46] = 127, + [1][0][2][0][RTW89_THAILAND][0][46] = 127, [1][0][2][0][RTW89_FCC][1][50] = 34, [1][0][2][0][RTW89_FCC][2][50] = 127, [1][0][2][0][RTW89_ETSI][1][50] = 127, @@ -43330,6 +44754,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][50] = 127, [1][0][2][0][RTW89_MKK][0][50] = 127, [1][0][2][0][RTW89_IC][1][50] = 34, + [1][0][2][0][RTW89_IC][2][50] = 68, [1][0][2][0][RTW89_KCC][1][50] = 40, [1][0][2][0][RTW89_KCC][0][50] = 127, [1][0][2][0][RTW89_ACMA][1][50] = 127, @@ -43339,6 +44764,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][50] = 127, [1][0][2][0][RTW89_UK][1][50] = 127, [1][0][2][0][RTW89_UK][0][50] = 127, + [1][0][2][0][RTW89_THAILAND][1][50] = 127, + [1][0][2][0][RTW89_THAILAND][0][50] = 127, [1][0][2][0][RTW89_FCC][1][54] = 36, [1][0][2][0][RTW89_FCC][2][54] = 127, [1][0][2][0][RTW89_ETSI][1][54] = 127, @@ -43346,6 +44773,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][54] = 127, [1][0][2][0][RTW89_MKK][0][54] = 127, [1][0][2][0][RTW89_IC][1][54] = 36, + [1][0][2][0][RTW89_IC][2][54] = 127, [1][0][2][0][RTW89_KCC][1][54] = 40, [1][0][2][0][RTW89_KCC][0][54] = 127, [1][0][2][0][RTW89_ACMA][1][54] = 127, @@ -43355,6 +44783,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][54] = 127, [1][0][2][0][RTW89_UK][1][54] = 127, [1][0][2][0][RTW89_UK][0][54] = 127, + [1][0][2][0][RTW89_THAILAND][1][54] = 127, + [1][0][2][0][RTW89_THAILAND][0][54] = 127, [1][0][2][0][RTW89_FCC][1][58] = 36, [1][0][2][0][RTW89_FCC][2][58] = 66, [1][0][2][0][RTW89_ETSI][1][58] = 127, @@ -43362,6 +44792,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][58] = 127, [1][0][2][0][RTW89_MKK][0][58] = 127, [1][0][2][0][RTW89_IC][1][58] = 36, + [1][0][2][0][RTW89_IC][2][58] = 66, [1][0][2][0][RTW89_KCC][1][58] = 40, [1][0][2][0][RTW89_KCC][0][58] = 127, [1][0][2][0][RTW89_ACMA][1][58] = 127, @@ -43371,6 +44802,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][58] = 127, [1][0][2][0][RTW89_UK][1][58] = 127, [1][0][2][0][RTW89_UK][0][58] = 127, + [1][0][2][0][RTW89_THAILAND][1][58] = 127, + [1][0][2][0][RTW89_THAILAND][0][58] = 127, [1][0][2][0][RTW89_FCC][1][61] = 34, [1][0][2][0][RTW89_FCC][2][61] = 66, [1][0][2][0][RTW89_ETSI][1][61] = 127, @@ -43378,6 +44811,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][61] = 127, [1][0][2][0][RTW89_MKK][0][61] = 127, [1][0][2][0][RTW89_IC][1][61] = 34, + [1][0][2][0][RTW89_IC][2][61] = 66, [1][0][2][0][RTW89_KCC][1][61] = 40, [1][0][2][0][RTW89_KCC][0][61] = 127, [1][0][2][0][RTW89_ACMA][1][61] = 127, @@ -43387,6 +44821,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][61] = 127, [1][0][2][0][RTW89_UK][1][61] = 127, [1][0][2][0][RTW89_UK][0][61] = 127, + [1][0][2][0][RTW89_THAILAND][1][61] = 127, + [1][0][2][0][RTW89_THAILAND][0][61] = 127, [1][0][2][0][RTW89_FCC][1][65] = 34, [1][0][2][0][RTW89_FCC][2][65] = 66, [1][0][2][0][RTW89_ETSI][1][65] = 127, @@ -43394,6 +44830,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][65] = 127, [1][0][2][0][RTW89_MKK][0][65] = 127, [1][0][2][0][RTW89_IC][1][65] = 34, + [1][0][2][0][RTW89_IC][2][65] = 66, [1][0][2][0][RTW89_KCC][1][65] = 40, [1][0][2][0][RTW89_KCC][0][65] = 127, [1][0][2][0][RTW89_ACMA][1][65] = 127, @@ -43403,6 +44840,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][65] = 127, [1][0][2][0][RTW89_UK][1][65] = 127, [1][0][2][0][RTW89_UK][0][65] = 127, + [1][0][2][0][RTW89_THAILAND][1][65] = 127, + [1][0][2][0][RTW89_THAILAND][0][65] = 127, [1][0][2][0][RTW89_FCC][1][69] = 34, [1][0][2][0][RTW89_FCC][2][69] = 66, [1][0][2][0][RTW89_ETSI][1][69] = 127, @@ -43410,6 +44849,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][69] = 127, [1][0][2][0][RTW89_MKK][0][69] = 127, [1][0][2][0][RTW89_IC][1][69] = 34, + [1][0][2][0][RTW89_IC][2][69] = 66, [1][0][2][0][RTW89_KCC][1][69] = 40, [1][0][2][0][RTW89_KCC][0][69] = 127, [1][0][2][0][RTW89_ACMA][1][69] = 127, @@ -43419,6 +44859,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][69] = 127, [1][0][2][0][RTW89_UK][1][69] = 127, [1][0][2][0][RTW89_UK][0][69] = 127, + [1][0][2][0][RTW89_THAILAND][1][69] = 127, + [1][0][2][0][RTW89_THAILAND][0][69] = 127, [1][0][2][0][RTW89_FCC][1][73] = 34, [1][0][2][0][RTW89_FCC][2][73] = 66, [1][0][2][0][RTW89_ETSI][1][73] = 127, @@ -43426,6 +44868,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][73] = 127, [1][0][2][0][RTW89_MKK][0][73] = 127, [1][0][2][0][RTW89_IC][1][73] = 34, + [1][0][2][0][RTW89_IC][2][73] = 66, [1][0][2][0][RTW89_KCC][1][73] = 40, [1][0][2][0][RTW89_KCC][0][73] = 127, [1][0][2][0][RTW89_ACMA][1][73] = 127, @@ -43435,6 +44878,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][73] = 127, [1][0][2][0][RTW89_UK][1][73] = 127, [1][0][2][0][RTW89_UK][0][73] = 127, + [1][0][2][0][RTW89_THAILAND][1][73] = 127, + [1][0][2][0][RTW89_THAILAND][0][73] = 127, [1][0][2][0][RTW89_FCC][1][76] = 34, [1][0][2][0][RTW89_FCC][2][76] = 66, [1][0][2][0][RTW89_ETSI][1][76] = 127, @@ -43442,6 +44887,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][76] = 127, [1][0][2][0][RTW89_MKK][0][76] = 127, [1][0][2][0][RTW89_IC][1][76] = 34, + [1][0][2][0][RTW89_IC][2][76] = 66, [1][0][2][0][RTW89_KCC][1][76] = 40, [1][0][2][0][RTW89_KCC][0][76] = 127, [1][0][2][0][RTW89_ACMA][1][76] = 127, @@ -43451,6 +44897,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][76] = 127, [1][0][2][0][RTW89_UK][1][76] = 127, [1][0][2][0][RTW89_UK][0][76] = 127, + [1][0][2][0][RTW89_THAILAND][1][76] = 127, + [1][0][2][0][RTW89_THAILAND][0][76] = 127, [1][0][2][0][RTW89_FCC][1][80] = 34, [1][0][2][0][RTW89_FCC][2][80] = 66, [1][0][2][0][RTW89_ETSI][1][80] = 127, @@ -43458,6 +44906,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][80] = 127, [1][0][2][0][RTW89_MKK][0][80] = 127, [1][0][2][0][RTW89_IC][1][80] = 34, + [1][0][2][0][RTW89_IC][2][80] = 66, [1][0][2][0][RTW89_KCC][1][80] = 42, [1][0][2][0][RTW89_KCC][0][80] = 127, [1][0][2][0][RTW89_ACMA][1][80] = 127, @@ -43467,6 +44916,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][80] = 127, [1][0][2][0][RTW89_UK][1][80] = 127, [1][0][2][0][RTW89_UK][0][80] = 127, + [1][0][2][0][RTW89_THAILAND][1][80] = 127, + [1][0][2][0][RTW89_THAILAND][0][80] = 127, [1][0][2][0][RTW89_FCC][1][84] = 34, [1][0][2][0][RTW89_FCC][2][84] = 66, [1][0][2][0][RTW89_ETSI][1][84] = 127, @@ -43474,6 +44925,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][84] = 127, [1][0][2][0][RTW89_MKK][0][84] = 127, [1][0][2][0][RTW89_IC][1][84] = 34, + [1][0][2][0][RTW89_IC][2][84] = 66, [1][0][2][0][RTW89_KCC][1][84] = 42, [1][0][2][0][RTW89_KCC][0][84] = 127, [1][0][2][0][RTW89_ACMA][1][84] = 127, @@ -43483,6 +44935,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][84] = 127, [1][0][2][0][RTW89_UK][1][84] = 127, [1][0][2][0][RTW89_UK][0][84] = 127, + [1][0][2][0][RTW89_THAILAND][1][84] = 127, + [1][0][2][0][RTW89_THAILAND][0][84] = 127, [1][0][2][0][RTW89_FCC][1][88] = 34, [1][0][2][0][RTW89_FCC][2][88] = 127, [1][0][2][0][RTW89_ETSI][1][88] = 127, @@ -43490,6 +44944,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][88] = 127, [1][0][2][0][RTW89_MKK][0][88] = 127, [1][0][2][0][RTW89_IC][1][88] = 34, + [1][0][2][0][RTW89_IC][2][88] = 127, [1][0][2][0][RTW89_KCC][1][88] = 42, [1][0][2][0][RTW89_KCC][0][88] = 127, [1][0][2][0][RTW89_ACMA][1][88] = 127, @@ -43499,6 +44954,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][88] = 127, [1][0][2][0][RTW89_UK][1][88] = 127, [1][0][2][0][RTW89_UK][0][88] = 127, + [1][0][2][0][RTW89_THAILAND][1][88] = 127, + [1][0][2][0][RTW89_THAILAND][0][88] = 127, [1][0][2][0][RTW89_FCC][1][91] = 36, [1][0][2][0][RTW89_FCC][2][91] = 127, [1][0][2][0][RTW89_ETSI][1][91] = 127, @@ -43506,6 +44963,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][91] = 127, [1][0][2][0][RTW89_MKK][0][91] = 127, [1][0][2][0][RTW89_IC][1][91] = 36, + [1][0][2][0][RTW89_IC][2][91] = 127, [1][0][2][0][RTW89_KCC][1][91] = 42, [1][0][2][0][RTW89_KCC][0][91] = 127, [1][0][2][0][RTW89_ACMA][1][91] = 127, @@ -43515,6 +44973,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][91] = 127, [1][0][2][0][RTW89_UK][1][91] = 127, [1][0][2][0][RTW89_UK][0][91] = 127, + [1][0][2][0][RTW89_THAILAND][1][91] = 127, + [1][0][2][0][RTW89_THAILAND][0][91] = 127, [1][0][2][0][RTW89_FCC][1][95] = 34, [1][0][2][0][RTW89_FCC][2][95] = 127, [1][0][2][0][RTW89_ETSI][1][95] = 127, @@ -43522,6 +44982,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][95] = 127, [1][0][2][0][RTW89_MKK][0][95] = 127, [1][0][2][0][RTW89_IC][1][95] = 34, + [1][0][2][0][RTW89_IC][2][95] = 127, [1][0][2][0][RTW89_KCC][1][95] = 42, [1][0][2][0][RTW89_KCC][0][95] = 127, [1][0][2][0][RTW89_ACMA][1][95] = 127, @@ -43531,6 +44992,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][95] = 127, [1][0][2][0][RTW89_UK][1][95] = 127, [1][0][2][0][RTW89_UK][0][95] = 127, + [1][0][2][0][RTW89_THAILAND][1][95] = 127, + [1][0][2][0][RTW89_THAILAND][0][95] = 127, [1][0][2][0][RTW89_FCC][1][99] = 34, [1][0][2][0][RTW89_FCC][2][99] = 127, [1][0][2][0][RTW89_ETSI][1][99] = 127, @@ -43538,6 +45001,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][99] = 127, [1][0][2][0][RTW89_MKK][0][99] = 127, [1][0][2][0][RTW89_IC][1][99] = 34, + [1][0][2][0][RTW89_IC][2][99] = 127, [1][0][2][0][RTW89_KCC][1][99] = 42, [1][0][2][0][RTW89_KCC][0][99] = 127, [1][0][2][0][RTW89_ACMA][1][99] = 127, @@ -43547,6 +45011,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][99] = 127, [1][0][2][0][RTW89_UK][1][99] = 127, [1][0][2][0][RTW89_UK][0][99] = 127, + [1][0][2][0][RTW89_THAILAND][1][99] = 127, + [1][0][2][0][RTW89_THAILAND][0][99] = 127, [1][0][2][0][RTW89_FCC][1][103] = 34, [1][0][2][0][RTW89_FCC][2][103] = 127, [1][0][2][0][RTW89_ETSI][1][103] = 127, @@ -43554,6 +45020,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][103] = 127, [1][0][2][0][RTW89_MKK][0][103] = 127, [1][0][2][0][RTW89_IC][1][103] = 34, + [1][0][2][0][RTW89_IC][2][103] = 127, [1][0][2][0][RTW89_KCC][1][103] = 42, [1][0][2][0][RTW89_KCC][0][103] = 127, [1][0][2][0][RTW89_ACMA][1][103] = 127, @@ -43563,6 +45030,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][103] = 127, [1][0][2][0][RTW89_UK][1][103] = 127, [1][0][2][0][RTW89_UK][0][103] = 127, + [1][0][2][0][RTW89_THAILAND][1][103] = 127, + [1][0][2][0][RTW89_THAILAND][0][103] = 127, [1][0][2][0][RTW89_FCC][1][106] = 36, [1][0][2][0][RTW89_FCC][2][106] = 127, [1][0][2][0][RTW89_ETSI][1][106] = 127, @@ -43570,6 +45039,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][106] = 127, [1][0][2][0][RTW89_MKK][0][106] = 127, [1][0][2][0][RTW89_IC][1][106] = 36, + [1][0][2][0][RTW89_IC][2][106] = 127, [1][0][2][0][RTW89_KCC][1][106] = 42, [1][0][2][0][RTW89_KCC][0][106] = 127, [1][0][2][0][RTW89_ACMA][1][106] = 127, @@ -43579,6 +45049,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][106] = 127, [1][0][2][0][RTW89_UK][1][106] = 127, [1][0][2][0][RTW89_UK][0][106] = 127, + [1][0][2][0][RTW89_THAILAND][1][106] = 127, + [1][0][2][0][RTW89_THAILAND][0][106] = 127, [1][0][2][0][RTW89_FCC][1][110] = 127, [1][0][2][0][RTW89_FCC][2][110] = 127, [1][0][2][0][RTW89_ETSI][1][110] = 127, @@ -43586,6 +45058,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][110] = 127, [1][0][2][0][RTW89_MKK][0][110] = 127, [1][0][2][0][RTW89_IC][1][110] = 127, + [1][0][2][0][RTW89_IC][2][110] = 127, [1][0][2][0][RTW89_KCC][1][110] = 127, [1][0][2][0][RTW89_KCC][0][110] = 127, [1][0][2][0][RTW89_ACMA][1][110] = 127, @@ -43595,6 +45068,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][110] = 127, [1][0][2][0][RTW89_UK][1][110] = 127, [1][0][2][0][RTW89_UK][0][110] = 127, + [1][0][2][0][RTW89_THAILAND][1][110] = 127, + [1][0][2][0][RTW89_THAILAND][0][110] = 127, [1][0][2][0][RTW89_FCC][1][114] = 127, [1][0][2][0][RTW89_FCC][2][114] = 127, [1][0][2][0][RTW89_ETSI][1][114] = 127, @@ -43602,6 +45077,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][114] = 127, [1][0][2][0][RTW89_MKK][0][114] = 127, [1][0][2][0][RTW89_IC][1][114] = 127, + [1][0][2][0][RTW89_IC][2][114] = 127, [1][0][2][0][RTW89_KCC][1][114] = 127, [1][0][2][0][RTW89_KCC][0][114] = 127, [1][0][2][0][RTW89_ACMA][1][114] = 127, @@ -43611,6 +45087,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][114] = 127, [1][0][2][0][RTW89_UK][1][114] = 127, [1][0][2][0][RTW89_UK][0][114] = 127, + [1][0][2][0][RTW89_THAILAND][1][114] = 127, + [1][0][2][0][RTW89_THAILAND][0][114] = 127, [1][0][2][0][RTW89_FCC][1][118] = 127, [1][0][2][0][RTW89_FCC][2][118] = 127, [1][0][2][0][RTW89_ETSI][1][118] = 127, @@ -43618,6 +45096,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][118] = 127, [1][0][2][0][RTW89_MKK][0][118] = 127, [1][0][2][0][RTW89_IC][1][118] = 127, + [1][0][2][0][RTW89_IC][2][118] = 127, [1][0][2][0][RTW89_KCC][1][118] = 127, [1][0][2][0][RTW89_KCC][0][118] = 127, [1][0][2][0][RTW89_ACMA][1][118] = 127, @@ -43627,6 +45106,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][118] = 127, [1][0][2][0][RTW89_UK][1][118] = 127, [1][0][2][0][RTW89_UK][0][118] = 127, + [1][0][2][0][RTW89_THAILAND][1][118] = 127, + [1][0][2][0][RTW89_THAILAND][0][118] = 127, [1][1][2][0][RTW89_FCC][1][1] = 10, [1][1][2][0][RTW89_FCC][2][1] = 58, [1][1][2][0][RTW89_ETSI][1][1] = 54, @@ -43634,6 +45115,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][1] = 52, [1][1][2][0][RTW89_MKK][0][1] = 12, [1][1][2][0][RTW89_IC][1][1] = 10, + [1][1][2][0][RTW89_IC][2][1] = 58, [1][1][2][0][RTW89_KCC][1][1] = 28, [1][1][2][0][RTW89_KCC][0][1] = 12, [1][1][2][0][RTW89_ACMA][1][1] = 54, @@ -43643,6 +45125,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][1] = 18, [1][1][2][0][RTW89_UK][1][1] = 54, [1][1][2][0][RTW89_UK][0][1] = 18, + [1][1][2][0][RTW89_THAILAND][1][1] = 46, + [1][1][2][0][RTW89_THAILAND][0][1] = 10, [1][1][2][0][RTW89_FCC][1][5] = 10, [1][1][2][0][RTW89_FCC][2][5] = 58, [1][1][2][0][RTW89_ETSI][1][5] = 54, @@ -43650,6 +45134,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][5] = 52, [1][1][2][0][RTW89_MKK][0][5] = 12, [1][1][2][0][RTW89_IC][1][5] = 10, + [1][1][2][0][RTW89_IC][2][5] = 58, [1][1][2][0][RTW89_KCC][1][5] = 28, [1][1][2][0][RTW89_KCC][0][5] = 12, [1][1][2][0][RTW89_ACMA][1][5] = 54, @@ -43659,6 +45144,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][5] = 16, [1][1][2][0][RTW89_UK][1][5] = 54, [1][1][2][0][RTW89_UK][0][5] = 16, + [1][1][2][0][RTW89_THAILAND][1][5] = 46, + [1][1][2][0][RTW89_THAILAND][0][5] = 10, [1][1][2][0][RTW89_FCC][1][9] = 10, [1][1][2][0][RTW89_FCC][2][9] = 58, [1][1][2][0][RTW89_ETSI][1][9] = 54, @@ -43666,6 +45153,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][9] = 52, [1][1][2][0][RTW89_MKK][0][9] = 12, [1][1][2][0][RTW89_IC][1][9] = 10, + [1][1][2][0][RTW89_IC][2][9] = 58, [1][1][2][0][RTW89_KCC][1][9] = 28, [1][1][2][0][RTW89_KCC][0][9] = 12, [1][1][2][0][RTW89_ACMA][1][9] = 54, @@ -43675,6 +45163,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][9] = 16, [1][1][2][0][RTW89_UK][1][9] = 54, [1][1][2][0][RTW89_UK][0][9] = 16, + [1][1][2][0][RTW89_THAILAND][1][9] = 46, + [1][1][2][0][RTW89_THAILAND][0][9] = 10, [1][1][2][0][RTW89_FCC][1][13] = 10, [1][1][2][0][RTW89_FCC][2][13] = 58, [1][1][2][0][RTW89_ETSI][1][13] = 54, @@ -43682,6 +45172,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][13] = 52, [1][1][2][0][RTW89_MKK][0][13] = 12, [1][1][2][0][RTW89_IC][1][13] = 10, + [1][1][2][0][RTW89_IC][2][13] = 58, [1][1][2][0][RTW89_KCC][1][13] = 28, [1][1][2][0][RTW89_KCC][0][13] = 12, [1][1][2][0][RTW89_ACMA][1][13] = 54, @@ -43691,6 +45182,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][13] = 16, [1][1][2][0][RTW89_UK][1][13] = 54, [1][1][2][0][RTW89_UK][0][13] = 16, + [1][1][2][0][RTW89_THAILAND][1][13] = 46, + [1][1][2][0][RTW89_THAILAND][0][13] = 10, [1][1][2][0][RTW89_FCC][1][16] = 10, [1][1][2][0][RTW89_FCC][2][16] = 58, [1][1][2][0][RTW89_ETSI][1][16] = 54, @@ -43698,6 +45191,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][16] = 52, [1][1][2][0][RTW89_MKK][0][16] = 12, [1][1][2][0][RTW89_IC][1][16] = 10, + [1][1][2][0][RTW89_IC][2][16] = 58, [1][1][2][0][RTW89_KCC][1][16] = 28, [1][1][2][0][RTW89_KCC][0][16] = 12, [1][1][2][0][RTW89_ACMA][1][16] = 54, @@ -43707,6 +45201,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][16] = 16, [1][1][2][0][RTW89_UK][1][16] = 54, [1][1][2][0][RTW89_UK][0][16] = 16, + [1][1][2][0][RTW89_THAILAND][1][16] = 46, + [1][1][2][0][RTW89_THAILAND][0][16] = 10, [1][1][2][0][RTW89_FCC][1][20] = 10, [1][1][2][0][RTW89_FCC][2][20] = 58, [1][1][2][0][RTW89_ETSI][1][20] = 54, @@ -43714,6 +45210,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][20] = 52, [1][1][2][0][RTW89_MKK][0][20] = 12, [1][1][2][0][RTW89_IC][1][20] = 10, + [1][1][2][0][RTW89_IC][2][20] = 58, [1][1][2][0][RTW89_KCC][1][20] = 28, [1][1][2][0][RTW89_KCC][0][20] = 12, [1][1][2][0][RTW89_ACMA][1][20] = 54, @@ -43723,6 +45220,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][20] = 16, [1][1][2][0][RTW89_UK][1][20] = 54, [1][1][2][0][RTW89_UK][0][20] = 16, + [1][1][2][0][RTW89_THAILAND][1][20] = 46, + [1][1][2][0][RTW89_THAILAND][0][20] = 10, [1][1][2][0][RTW89_FCC][1][24] = 10, [1][1][2][0][RTW89_FCC][2][24] = 70, [1][1][2][0][RTW89_ETSI][1][24] = 54, @@ -43730,6 +45229,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][24] = 54, [1][1][2][0][RTW89_MKK][0][24] = 14, [1][1][2][0][RTW89_IC][1][24] = 10, + [1][1][2][0][RTW89_IC][2][24] = 70, [1][1][2][0][RTW89_KCC][1][24] = 28, [1][1][2][0][RTW89_KCC][0][24] = 12, [1][1][2][0][RTW89_ACMA][1][24] = 54, @@ -43739,6 +45239,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][24] = 16, [1][1][2][0][RTW89_UK][1][24] = 54, [1][1][2][0][RTW89_UK][0][24] = 16, + [1][1][2][0][RTW89_THAILAND][1][24] = 46, + [1][1][2][0][RTW89_THAILAND][0][24] = 10, [1][1][2][0][RTW89_FCC][1][28] = 10, [1][1][2][0][RTW89_FCC][2][28] = 70, [1][1][2][0][RTW89_ETSI][1][28] = 54, @@ -43746,6 +45248,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][28] = 52, [1][1][2][0][RTW89_MKK][0][28] = 14, [1][1][2][0][RTW89_IC][1][28] = 10, + [1][1][2][0][RTW89_IC][2][28] = 70, [1][1][2][0][RTW89_KCC][1][28] = 28, [1][1][2][0][RTW89_KCC][0][28] = 14, [1][1][2][0][RTW89_ACMA][1][28] = 54, @@ -43755,6 +45258,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][28] = 16, [1][1][2][0][RTW89_UK][1][28] = 54, [1][1][2][0][RTW89_UK][0][28] = 16, + [1][1][2][0][RTW89_THAILAND][1][28] = 46, + [1][1][2][0][RTW89_THAILAND][0][28] = 10, [1][1][2][0][RTW89_FCC][1][31] = 10, [1][1][2][0][RTW89_FCC][2][31] = 70, [1][1][2][0][RTW89_ETSI][1][31] = 54, @@ -43762,6 +45267,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][31] = 52, [1][1][2][0][RTW89_MKK][0][31] = 14, [1][1][2][0][RTW89_IC][1][31] = 10, + [1][1][2][0][RTW89_IC][2][31] = 70, [1][1][2][0][RTW89_KCC][1][31] = 28, [1][1][2][0][RTW89_KCC][0][31] = 14, [1][1][2][0][RTW89_ACMA][1][31] = 54, @@ -43771,6 +45277,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][31] = 16, [1][1][2][0][RTW89_UK][1][31] = 54, [1][1][2][0][RTW89_UK][0][31] = 16, + [1][1][2][0][RTW89_THAILAND][1][31] = 46, + [1][1][2][0][RTW89_THAILAND][0][31] = 10, [1][1][2][0][RTW89_FCC][1][35] = 10, [1][1][2][0][RTW89_FCC][2][35] = 70, [1][1][2][0][RTW89_ETSI][1][35] = 54, @@ -43778,6 +45286,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][35] = 52, [1][1][2][0][RTW89_MKK][0][35] = 14, [1][1][2][0][RTW89_IC][1][35] = 10, + [1][1][2][0][RTW89_IC][2][35] = 70, [1][1][2][0][RTW89_KCC][1][35] = 28, [1][1][2][0][RTW89_KCC][0][35] = 14, [1][1][2][0][RTW89_ACMA][1][35] = 54, @@ -43787,6 +45296,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][35] = 16, [1][1][2][0][RTW89_UK][1][35] = 54, [1][1][2][0][RTW89_UK][0][35] = 16, + [1][1][2][0][RTW89_THAILAND][1][35] = 46, + [1][1][2][0][RTW89_THAILAND][0][35] = 10, [1][1][2][0][RTW89_FCC][1][39] = 10, [1][1][2][0][RTW89_FCC][2][39] = 70, [1][1][2][0][RTW89_ETSI][1][39] = 54, @@ -43794,6 +45305,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][39] = 52, [1][1][2][0][RTW89_MKK][0][39] = 14, [1][1][2][0][RTW89_IC][1][39] = 10, + [1][1][2][0][RTW89_IC][2][39] = 70, [1][1][2][0][RTW89_KCC][1][39] = 28, [1][1][2][0][RTW89_KCC][0][39] = 14, [1][1][2][0][RTW89_ACMA][1][39] = 54, @@ -43803,6 +45315,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][39] = 16, [1][1][2][0][RTW89_UK][1][39] = 54, [1][1][2][0][RTW89_UK][0][39] = 16, + [1][1][2][0][RTW89_THAILAND][1][39] = 46, + [1][1][2][0][RTW89_THAILAND][0][39] = 10, [1][1][2][0][RTW89_FCC][1][43] = 10, [1][1][2][0][RTW89_FCC][2][43] = 70, [1][1][2][0][RTW89_ETSI][1][43] = 54, @@ -43810,6 +45324,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][43] = 52, [1][1][2][0][RTW89_MKK][0][43] = 14, [1][1][2][0][RTW89_IC][1][43] = 10, + [1][1][2][0][RTW89_IC][2][43] = 70, [1][1][2][0][RTW89_KCC][1][43] = 28, [1][1][2][0][RTW89_KCC][0][43] = 14, [1][1][2][0][RTW89_ACMA][1][43] = 54, @@ -43819,6 +45334,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][43] = 16, [1][1][2][0][RTW89_UK][1][43] = 54, [1][1][2][0][RTW89_UK][0][43] = 16, + [1][1][2][0][RTW89_THAILAND][1][43] = 46, + [1][1][2][0][RTW89_THAILAND][0][43] = 10, [1][1][2][0][RTW89_FCC][1][46] = 12, [1][1][2][0][RTW89_FCC][2][46] = 127, [1][1][2][0][RTW89_ETSI][1][46] = 127, @@ -43826,6 +45343,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][46] = 127, [1][1][2][0][RTW89_MKK][0][46] = 127, [1][1][2][0][RTW89_IC][1][46] = 12, + [1][1][2][0][RTW89_IC][2][46] = 68, [1][1][2][0][RTW89_KCC][1][46] = 28, [1][1][2][0][RTW89_KCC][0][46] = 127, [1][1][2][0][RTW89_ACMA][1][46] = 127, @@ -43835,6 +45353,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][46] = 127, [1][1][2][0][RTW89_UK][1][46] = 127, [1][1][2][0][RTW89_UK][0][46] = 127, + [1][1][2][0][RTW89_THAILAND][1][46] = 127, + [1][1][2][0][RTW89_THAILAND][0][46] = 127, [1][1][2][0][RTW89_FCC][1][50] = 12, [1][1][2][0][RTW89_FCC][2][50] = 127, [1][1][2][0][RTW89_ETSI][1][50] = 127, @@ -43842,6 +45362,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][50] = 127, [1][1][2][0][RTW89_MKK][0][50] = 127, [1][1][2][0][RTW89_IC][1][50] = 12, + [1][1][2][0][RTW89_IC][2][50] = 68, [1][1][2][0][RTW89_KCC][1][50] = 28, [1][1][2][0][RTW89_KCC][0][50] = 127, [1][1][2][0][RTW89_ACMA][1][50] = 127, @@ -43851,6 +45372,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][50] = 127, [1][1][2][0][RTW89_UK][1][50] = 127, [1][1][2][0][RTW89_UK][0][50] = 127, + [1][1][2][0][RTW89_THAILAND][1][50] = 127, + [1][1][2][0][RTW89_THAILAND][0][50] = 127, [1][1][2][0][RTW89_FCC][1][54] = 10, [1][1][2][0][RTW89_FCC][2][54] = 127, [1][1][2][0][RTW89_ETSI][1][54] = 127, @@ -43858,6 +45381,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][54] = 127, [1][1][2][0][RTW89_MKK][0][54] = 127, [1][1][2][0][RTW89_IC][1][54] = 10, + [1][1][2][0][RTW89_IC][2][54] = 127, [1][1][2][0][RTW89_KCC][1][54] = 28, [1][1][2][0][RTW89_KCC][0][54] = 127, [1][1][2][0][RTW89_ACMA][1][54] = 127, @@ -43867,6 +45391,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][54] = 127, [1][1][2][0][RTW89_UK][1][54] = 127, [1][1][2][0][RTW89_UK][0][54] = 127, + [1][1][2][0][RTW89_THAILAND][1][54] = 127, + [1][1][2][0][RTW89_THAILAND][0][54] = 127, [1][1][2][0][RTW89_FCC][1][58] = 10, [1][1][2][0][RTW89_FCC][2][58] = 66, [1][1][2][0][RTW89_ETSI][1][58] = 127, @@ -43874,6 +45400,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][58] = 127, [1][1][2][0][RTW89_MKK][0][58] = 127, [1][1][2][0][RTW89_IC][1][58] = 10, + [1][1][2][0][RTW89_IC][2][58] = 66, [1][1][2][0][RTW89_KCC][1][58] = 28, [1][1][2][0][RTW89_KCC][0][58] = 127, [1][1][2][0][RTW89_ACMA][1][58] = 127, @@ -43883,6 +45410,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][58] = 127, [1][1][2][0][RTW89_UK][1][58] = 127, [1][1][2][0][RTW89_UK][0][58] = 127, + [1][1][2][0][RTW89_THAILAND][1][58] = 127, + [1][1][2][0][RTW89_THAILAND][0][58] = 127, [1][1][2][0][RTW89_FCC][1][61] = 10, [1][1][2][0][RTW89_FCC][2][61] = 66, [1][1][2][0][RTW89_ETSI][1][61] = 127, @@ -43890,6 +45419,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][61] = 127, [1][1][2][0][RTW89_MKK][0][61] = 127, [1][1][2][0][RTW89_IC][1][61] = 10, + [1][1][2][0][RTW89_IC][2][61] = 66, [1][1][2][0][RTW89_KCC][1][61] = 28, [1][1][2][0][RTW89_KCC][0][61] = 127, [1][1][2][0][RTW89_ACMA][1][61] = 127, @@ -43899,6 +45429,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][61] = 127, [1][1][2][0][RTW89_UK][1][61] = 127, [1][1][2][0][RTW89_UK][0][61] = 127, + [1][1][2][0][RTW89_THAILAND][1][61] = 127, + [1][1][2][0][RTW89_THAILAND][0][61] = 127, [1][1][2][0][RTW89_FCC][1][65] = 10, [1][1][2][0][RTW89_FCC][2][65] = 66, [1][1][2][0][RTW89_ETSI][1][65] = 127, @@ -43906,6 +45438,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][65] = 127, [1][1][2][0][RTW89_MKK][0][65] = 127, [1][1][2][0][RTW89_IC][1][65] = 10, + [1][1][2][0][RTW89_IC][2][65] = 66, [1][1][2][0][RTW89_KCC][1][65] = 28, [1][1][2][0][RTW89_KCC][0][65] = 127, [1][1][2][0][RTW89_ACMA][1][65] = 127, @@ -43915,6 +45448,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][65] = 127, [1][1][2][0][RTW89_UK][1][65] = 127, [1][1][2][0][RTW89_UK][0][65] = 127, + [1][1][2][0][RTW89_THAILAND][1][65] = 127, + [1][1][2][0][RTW89_THAILAND][0][65] = 127, [1][1][2][0][RTW89_FCC][1][69] = 10, [1][1][2][0][RTW89_FCC][2][69] = 66, [1][1][2][0][RTW89_ETSI][1][69] = 127, @@ -43922,6 +45457,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][69] = 127, [1][1][2][0][RTW89_MKK][0][69] = 127, [1][1][2][0][RTW89_IC][1][69] = 10, + [1][1][2][0][RTW89_IC][2][69] = 66, [1][1][2][0][RTW89_KCC][1][69] = 28, [1][1][2][0][RTW89_KCC][0][69] = 127, [1][1][2][0][RTW89_ACMA][1][69] = 127, @@ -43931,6 +45467,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][69] = 127, [1][1][2][0][RTW89_UK][1][69] = 127, [1][1][2][0][RTW89_UK][0][69] = 127, + [1][1][2][0][RTW89_THAILAND][1][69] = 127, + [1][1][2][0][RTW89_THAILAND][0][69] = 127, [1][1][2][0][RTW89_FCC][1][73] = 10, [1][1][2][0][RTW89_FCC][2][73] = 66, [1][1][2][0][RTW89_ETSI][1][73] = 127, @@ -43938,6 +45476,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][73] = 127, [1][1][2][0][RTW89_MKK][0][73] = 127, [1][1][2][0][RTW89_IC][1][73] = 10, + [1][1][2][0][RTW89_IC][2][73] = 66, [1][1][2][0][RTW89_KCC][1][73] = 28, [1][1][2][0][RTW89_KCC][0][73] = 127, [1][1][2][0][RTW89_ACMA][1][73] = 127, @@ -43947,6 +45486,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][73] = 127, [1][1][2][0][RTW89_UK][1][73] = 127, [1][1][2][0][RTW89_UK][0][73] = 127, + [1][1][2][0][RTW89_THAILAND][1][73] = 127, + [1][1][2][0][RTW89_THAILAND][0][73] = 127, [1][1][2][0][RTW89_FCC][1][76] = 10, [1][1][2][0][RTW89_FCC][2][76] = 66, [1][1][2][0][RTW89_ETSI][1][76] = 127, @@ -43954,6 +45495,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][76] = 127, [1][1][2][0][RTW89_MKK][0][76] = 127, [1][1][2][0][RTW89_IC][1][76] = 10, + [1][1][2][0][RTW89_IC][2][76] = 66, [1][1][2][0][RTW89_KCC][1][76] = 28, [1][1][2][0][RTW89_KCC][0][76] = 127, [1][1][2][0][RTW89_ACMA][1][76] = 127, @@ -43963,6 +45505,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][76] = 127, [1][1][2][0][RTW89_UK][1][76] = 127, [1][1][2][0][RTW89_UK][0][76] = 127, + [1][1][2][0][RTW89_THAILAND][1][76] = 127, + [1][1][2][0][RTW89_THAILAND][0][76] = 127, [1][1][2][0][RTW89_FCC][1][80] = 10, [1][1][2][0][RTW89_FCC][2][80] = 66, [1][1][2][0][RTW89_ETSI][1][80] = 127, @@ -43970,6 +45514,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][80] = 127, [1][1][2][0][RTW89_MKK][0][80] = 127, [1][1][2][0][RTW89_IC][1][80] = 10, + [1][1][2][0][RTW89_IC][2][80] = 66, [1][1][2][0][RTW89_KCC][1][80] = 32, [1][1][2][0][RTW89_KCC][0][80] = 127, [1][1][2][0][RTW89_ACMA][1][80] = 127, @@ -43979,6 +45524,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][80] = 127, [1][1][2][0][RTW89_UK][1][80] = 127, [1][1][2][0][RTW89_UK][0][80] = 127, + [1][1][2][0][RTW89_THAILAND][1][80] = 127, + [1][1][2][0][RTW89_THAILAND][0][80] = 127, [1][1][2][0][RTW89_FCC][1][84] = 10, [1][1][2][0][RTW89_FCC][2][84] = 66, [1][1][2][0][RTW89_ETSI][1][84] = 127, @@ -43986,6 +45533,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][84] = 127, [1][1][2][0][RTW89_MKK][0][84] = 127, [1][1][2][0][RTW89_IC][1][84] = 10, + [1][1][2][0][RTW89_IC][2][84] = 66, [1][1][2][0][RTW89_KCC][1][84] = 32, [1][1][2][0][RTW89_KCC][0][84] = 127, [1][1][2][0][RTW89_ACMA][1][84] = 127, @@ -43995,6 +45543,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][84] = 127, [1][1][2][0][RTW89_UK][1][84] = 127, [1][1][2][0][RTW89_UK][0][84] = 127, + [1][1][2][0][RTW89_THAILAND][1][84] = 127, + [1][1][2][0][RTW89_THAILAND][0][84] = 127, [1][1][2][0][RTW89_FCC][1][88] = 10, [1][1][2][0][RTW89_FCC][2][88] = 127, [1][1][2][0][RTW89_ETSI][1][88] = 127, @@ -44002,6 +45552,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][88] = 127, [1][1][2][0][RTW89_MKK][0][88] = 127, [1][1][2][0][RTW89_IC][1][88] = 10, + [1][1][2][0][RTW89_IC][2][88] = 127, [1][1][2][0][RTW89_KCC][1][88] = 32, [1][1][2][0][RTW89_KCC][0][88] = 127, [1][1][2][0][RTW89_ACMA][1][88] = 127, @@ -44011,6 +45562,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][88] = 127, [1][1][2][0][RTW89_UK][1][88] = 127, [1][1][2][0][RTW89_UK][0][88] = 127, + [1][1][2][0][RTW89_THAILAND][1][88] = 127, + [1][1][2][0][RTW89_THAILAND][0][88] = 127, [1][1][2][0][RTW89_FCC][1][91] = 12, [1][1][2][0][RTW89_FCC][2][91] = 127, [1][1][2][0][RTW89_ETSI][1][91] = 127, @@ -44018,6 +45571,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][91] = 127, [1][1][2][0][RTW89_MKK][0][91] = 127, [1][1][2][0][RTW89_IC][1][91] = 12, + [1][1][2][0][RTW89_IC][2][91] = 127, [1][1][2][0][RTW89_KCC][1][91] = 32, [1][1][2][0][RTW89_KCC][0][91] = 127, [1][1][2][0][RTW89_ACMA][1][91] = 127, @@ -44027,6 +45581,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][91] = 127, [1][1][2][0][RTW89_UK][1][91] = 127, [1][1][2][0][RTW89_UK][0][91] = 127, + [1][1][2][0][RTW89_THAILAND][1][91] = 127, + [1][1][2][0][RTW89_THAILAND][0][91] = 127, [1][1][2][0][RTW89_FCC][1][95] = 10, [1][1][2][0][RTW89_FCC][2][95] = 127, [1][1][2][0][RTW89_ETSI][1][95] = 127, @@ -44034,6 +45590,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][95] = 127, [1][1][2][0][RTW89_MKK][0][95] = 127, [1][1][2][0][RTW89_IC][1][95] = 10, + [1][1][2][0][RTW89_IC][2][95] = 127, [1][1][2][0][RTW89_KCC][1][95] = 32, [1][1][2][0][RTW89_KCC][0][95] = 127, [1][1][2][0][RTW89_ACMA][1][95] = 127, @@ -44043,6 +45600,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][95] = 127, [1][1][2][0][RTW89_UK][1][95] = 127, [1][1][2][0][RTW89_UK][0][95] = 127, + [1][1][2][0][RTW89_THAILAND][1][95] = 127, + [1][1][2][0][RTW89_THAILAND][0][95] = 127, [1][1][2][0][RTW89_FCC][1][99] = 10, [1][1][2][0][RTW89_FCC][2][99] = 127, [1][1][2][0][RTW89_ETSI][1][99] = 127, @@ -44050,6 +45609,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][99] = 127, [1][1][2][0][RTW89_MKK][0][99] = 127, [1][1][2][0][RTW89_IC][1][99] = 10, + [1][1][2][0][RTW89_IC][2][99] = 127, [1][1][2][0][RTW89_KCC][1][99] = 32, [1][1][2][0][RTW89_KCC][0][99] = 127, [1][1][2][0][RTW89_ACMA][1][99] = 127, @@ -44059,6 +45619,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][99] = 127, [1][1][2][0][RTW89_UK][1][99] = 127, [1][1][2][0][RTW89_UK][0][99] = 127, + [1][1][2][0][RTW89_THAILAND][1][99] = 127, + [1][1][2][0][RTW89_THAILAND][0][99] = 127, [1][1][2][0][RTW89_FCC][1][103] = 10, [1][1][2][0][RTW89_FCC][2][103] = 127, [1][1][2][0][RTW89_ETSI][1][103] = 127, @@ -44066,6 +45628,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][103] = 127, [1][1][2][0][RTW89_MKK][0][103] = 127, [1][1][2][0][RTW89_IC][1][103] = 10, + [1][1][2][0][RTW89_IC][2][103] = 127, [1][1][2][0][RTW89_KCC][1][103] = 32, [1][1][2][0][RTW89_KCC][0][103] = 127, [1][1][2][0][RTW89_ACMA][1][103] = 127, @@ -44075,6 +45638,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][103] = 127, [1][1][2][0][RTW89_UK][1][103] = 127, [1][1][2][0][RTW89_UK][0][103] = 127, + [1][1][2][0][RTW89_THAILAND][1][103] = 127, + [1][1][2][0][RTW89_THAILAND][0][103] = 127, [1][1][2][0][RTW89_FCC][1][106] = 12, [1][1][2][0][RTW89_FCC][2][106] = 127, [1][1][2][0][RTW89_ETSI][1][106] = 127, @@ -44082,6 +45647,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][106] = 127, [1][1][2][0][RTW89_MKK][0][106] = 127, [1][1][2][0][RTW89_IC][1][106] = 12, + [1][1][2][0][RTW89_IC][2][106] = 127, [1][1][2][0][RTW89_KCC][1][106] = 32, [1][1][2][0][RTW89_KCC][0][106] = 127, [1][1][2][0][RTW89_ACMA][1][106] = 127, @@ -44091,6 +45657,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][106] = 127, [1][1][2][0][RTW89_UK][1][106] = 127, [1][1][2][0][RTW89_UK][0][106] = 127, + [1][1][2][0][RTW89_THAILAND][1][106] = 127, + [1][1][2][0][RTW89_THAILAND][0][106] = 127, [1][1][2][0][RTW89_FCC][1][110] = 127, [1][1][2][0][RTW89_FCC][2][110] = 127, [1][1][2][0][RTW89_ETSI][1][110] = 127, @@ -44098,6 +45666,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][110] = 127, [1][1][2][0][RTW89_MKK][0][110] = 127, [1][1][2][0][RTW89_IC][1][110] = 127, + [1][1][2][0][RTW89_IC][2][110] = 127, [1][1][2][0][RTW89_KCC][1][110] = 127, [1][1][2][0][RTW89_KCC][0][110] = 127, [1][1][2][0][RTW89_ACMA][1][110] = 127, @@ -44107,6 +45676,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][110] = 127, [1][1][2][0][RTW89_UK][1][110] = 127, [1][1][2][0][RTW89_UK][0][110] = 127, + [1][1][2][0][RTW89_THAILAND][1][110] = 127, + [1][1][2][0][RTW89_THAILAND][0][110] = 127, [1][1][2][0][RTW89_FCC][1][114] = 127, [1][1][2][0][RTW89_FCC][2][114] = 127, [1][1][2][0][RTW89_ETSI][1][114] = 127, @@ -44114,6 +45685,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][114] = 127, [1][1][2][0][RTW89_MKK][0][114] = 127, [1][1][2][0][RTW89_IC][1][114] = 127, + [1][1][2][0][RTW89_IC][2][114] = 127, [1][1][2][0][RTW89_KCC][1][114] = 127, [1][1][2][0][RTW89_KCC][0][114] = 127, [1][1][2][0][RTW89_ACMA][1][114] = 127, @@ -44123,6 +45695,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][114] = 127, [1][1][2][0][RTW89_UK][1][114] = 127, [1][1][2][0][RTW89_UK][0][114] = 127, + [1][1][2][0][RTW89_THAILAND][1][114] = 127, + [1][1][2][0][RTW89_THAILAND][0][114] = 127, [1][1][2][0][RTW89_FCC][1][118] = 127, [1][1][2][0][RTW89_FCC][2][118] = 127, [1][1][2][0][RTW89_ETSI][1][118] = 127, @@ -44130,6 +45704,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][118] = 127, [1][1][2][0][RTW89_MKK][0][118] = 127, [1][1][2][0][RTW89_IC][1][118] = 127, + [1][1][2][0][RTW89_IC][2][118] = 127, [1][1][2][0][RTW89_KCC][1][118] = 127, [1][1][2][0][RTW89_KCC][0][118] = 127, [1][1][2][0][RTW89_ACMA][1][118] = 127, @@ -44139,6 +45714,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][118] = 127, [1][1][2][0][RTW89_UK][1][118] = 127, [1][1][2][0][RTW89_UK][0][118] = 127, + [1][1][2][0][RTW89_THAILAND][1][118] = 127, + [1][1][2][0][RTW89_THAILAND][0][118] = 127, [1][1][2][1][RTW89_FCC][1][1] = 10, [1][1][2][1][RTW89_FCC][2][1] = 58, [1][1][2][1][RTW89_ETSI][1][1] = 42, @@ -44146,6 +45723,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][1] = 52, [1][1][2][1][RTW89_MKK][0][1] = 12, [1][1][2][1][RTW89_IC][1][1] = 10, + [1][1][2][1][RTW89_IC][2][1] = 58, [1][1][2][1][RTW89_KCC][1][1] = 28, [1][1][2][1][RTW89_KCC][0][1] = 12, [1][1][2][1][RTW89_ACMA][1][1] = 42, @@ -44155,6 +45733,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][1] = 6, [1][1][2][1][RTW89_UK][1][1] = 42, [1][1][2][1][RTW89_UK][0][1] = 6, + [1][1][2][1][RTW89_THAILAND][1][1] = 46, + [1][1][2][1][RTW89_THAILAND][0][1] = 6, [1][1][2][1][RTW89_FCC][1][5] = 10, [1][1][2][1][RTW89_FCC][2][5] = 58, [1][1][2][1][RTW89_ETSI][1][5] = 42, @@ -44162,6 +45742,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][5] = 52, [1][1][2][1][RTW89_MKK][0][5] = 12, [1][1][2][1][RTW89_IC][1][5] = 10, + [1][1][2][1][RTW89_IC][2][5] = 58, [1][1][2][1][RTW89_KCC][1][5] = 28, [1][1][2][1][RTW89_KCC][0][5] = 12, [1][1][2][1][RTW89_ACMA][1][5] = 42, @@ -44171,6 +45752,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][5] = 6, [1][1][2][1][RTW89_UK][1][5] = 42, [1][1][2][1][RTW89_UK][0][5] = 6, + [1][1][2][1][RTW89_THAILAND][1][5] = 46, + [1][1][2][1][RTW89_THAILAND][0][5] = 6, [1][1][2][1][RTW89_FCC][1][9] = 10, [1][1][2][1][RTW89_FCC][2][9] = 58, [1][1][2][1][RTW89_ETSI][1][9] = 42, @@ -44178,6 +45761,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][9] = 52, [1][1][2][1][RTW89_MKK][0][9] = 12, [1][1][2][1][RTW89_IC][1][9] = 10, + [1][1][2][1][RTW89_IC][2][9] = 58, [1][1][2][1][RTW89_KCC][1][9] = 28, [1][1][2][1][RTW89_KCC][0][9] = 12, [1][1][2][1][RTW89_ACMA][1][9] = 42, @@ -44187,6 +45771,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][9] = 6, [1][1][2][1][RTW89_UK][1][9] = 42, [1][1][2][1][RTW89_UK][0][9] = 6, + [1][1][2][1][RTW89_THAILAND][1][9] = 46, + [1][1][2][1][RTW89_THAILAND][0][9] = 6, [1][1][2][1][RTW89_FCC][1][13] = 10, [1][1][2][1][RTW89_FCC][2][13] = 58, [1][1][2][1][RTW89_ETSI][1][13] = 42, @@ -44194,6 +45780,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][13] = 52, [1][1][2][1][RTW89_MKK][0][13] = 12, [1][1][2][1][RTW89_IC][1][13] = 10, + [1][1][2][1][RTW89_IC][2][13] = 58, [1][1][2][1][RTW89_KCC][1][13] = 28, [1][1][2][1][RTW89_KCC][0][13] = 12, [1][1][2][1][RTW89_ACMA][1][13] = 42, @@ -44203,6 +45790,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][13] = 6, [1][1][2][1][RTW89_UK][1][13] = 42, [1][1][2][1][RTW89_UK][0][13] = 6, + [1][1][2][1][RTW89_THAILAND][1][13] = 46, + [1][1][2][1][RTW89_THAILAND][0][13] = 6, [1][1][2][1][RTW89_FCC][1][16] = 10, [1][1][2][1][RTW89_FCC][2][16] = 58, [1][1][2][1][RTW89_ETSI][1][16] = 42, @@ -44210,6 +45799,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][16] = 52, [1][1][2][1][RTW89_MKK][0][16] = 12, [1][1][2][1][RTW89_IC][1][16] = 10, + [1][1][2][1][RTW89_IC][2][16] = 58, [1][1][2][1][RTW89_KCC][1][16] = 28, [1][1][2][1][RTW89_KCC][0][16] = 12, [1][1][2][1][RTW89_ACMA][1][16] = 42, @@ -44219,6 +45809,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][16] = 6, [1][1][2][1][RTW89_UK][1][16] = 42, [1][1][2][1][RTW89_UK][0][16] = 6, + [1][1][2][1][RTW89_THAILAND][1][16] = 46, + [1][1][2][1][RTW89_THAILAND][0][16] = 6, [1][1][2][1][RTW89_FCC][1][20] = 10, [1][1][2][1][RTW89_FCC][2][20] = 58, [1][1][2][1][RTW89_ETSI][1][20] = 42, @@ -44226,6 +45818,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][20] = 52, [1][1][2][1][RTW89_MKK][0][20] = 12, [1][1][2][1][RTW89_IC][1][20] = 10, + [1][1][2][1][RTW89_IC][2][20] = 58, [1][1][2][1][RTW89_KCC][1][20] = 28, [1][1][2][1][RTW89_KCC][0][20] = 12, [1][1][2][1][RTW89_ACMA][1][20] = 42, @@ -44235,6 +45828,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][20] = 6, [1][1][2][1][RTW89_UK][1][20] = 42, [1][1][2][1][RTW89_UK][0][20] = 6, + [1][1][2][1][RTW89_THAILAND][1][20] = 46, + [1][1][2][1][RTW89_THAILAND][0][20] = 6, [1][1][2][1][RTW89_FCC][1][24] = 10, [1][1][2][1][RTW89_FCC][2][24] = 70, [1][1][2][1][RTW89_ETSI][1][24] = 42, @@ -44242,6 +45837,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][24] = 54, [1][1][2][1][RTW89_MKK][0][24] = 14, [1][1][2][1][RTW89_IC][1][24] = 10, + [1][1][2][1][RTW89_IC][2][24] = 70, [1][1][2][1][RTW89_KCC][1][24] = 28, [1][1][2][1][RTW89_KCC][0][24] = 12, [1][1][2][1][RTW89_ACMA][1][24] = 42, @@ -44251,6 +45847,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][24] = 6, [1][1][2][1][RTW89_UK][1][24] = 42, [1][1][2][1][RTW89_UK][0][24] = 6, + [1][1][2][1][RTW89_THAILAND][1][24] = 46, + [1][1][2][1][RTW89_THAILAND][0][24] = 6, [1][1][2][1][RTW89_FCC][1][28] = 10, [1][1][2][1][RTW89_FCC][2][28] = 70, [1][1][2][1][RTW89_ETSI][1][28] = 42, @@ -44258,6 +45856,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][28] = 52, [1][1][2][1][RTW89_MKK][0][28] = 14, [1][1][2][1][RTW89_IC][1][28] = 10, + [1][1][2][1][RTW89_IC][2][28] = 70, [1][1][2][1][RTW89_KCC][1][28] = 28, [1][1][2][1][RTW89_KCC][0][28] = 14, [1][1][2][1][RTW89_ACMA][1][28] = 42, @@ -44267,6 +45866,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][28] = 6, [1][1][2][1][RTW89_UK][1][28] = 42, [1][1][2][1][RTW89_UK][0][28] = 6, + [1][1][2][1][RTW89_THAILAND][1][28] = 46, + [1][1][2][1][RTW89_THAILAND][0][28] = 6, [1][1][2][1][RTW89_FCC][1][31] = 10, [1][1][2][1][RTW89_FCC][2][31] = 70, [1][1][2][1][RTW89_ETSI][1][31] = 42, @@ -44274,6 +45875,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][31] = 52, [1][1][2][1][RTW89_MKK][0][31] = 14, [1][1][2][1][RTW89_IC][1][31] = 10, + [1][1][2][1][RTW89_IC][2][31] = 70, [1][1][2][1][RTW89_KCC][1][31] = 28, [1][1][2][1][RTW89_KCC][0][31] = 14, [1][1][2][1][RTW89_ACMA][1][31] = 42, @@ -44283,6 +45885,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][31] = 6, [1][1][2][1][RTW89_UK][1][31] = 42, [1][1][2][1][RTW89_UK][0][31] = 6, + [1][1][2][1][RTW89_THAILAND][1][31] = 46, + [1][1][2][1][RTW89_THAILAND][0][31] = 6, [1][1][2][1][RTW89_FCC][1][35] = 10, [1][1][2][1][RTW89_FCC][2][35] = 70, [1][1][2][1][RTW89_ETSI][1][35] = 42, @@ -44290,6 +45894,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][35] = 52, [1][1][2][1][RTW89_MKK][0][35] = 14, [1][1][2][1][RTW89_IC][1][35] = 10, + [1][1][2][1][RTW89_IC][2][35] = 70, [1][1][2][1][RTW89_KCC][1][35] = 28, [1][1][2][1][RTW89_KCC][0][35] = 14, [1][1][2][1][RTW89_ACMA][1][35] = 42, @@ -44299,6 +45904,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][35] = 6, [1][1][2][1][RTW89_UK][1][35] = 42, [1][1][2][1][RTW89_UK][0][35] = 6, + [1][1][2][1][RTW89_THAILAND][1][35] = 46, + [1][1][2][1][RTW89_THAILAND][0][35] = 6, [1][1][2][1][RTW89_FCC][1][39] = 10, [1][1][2][1][RTW89_FCC][2][39] = 70, [1][1][2][1][RTW89_ETSI][1][39] = 42, @@ -44306,6 +45913,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][39] = 52, [1][1][2][1][RTW89_MKK][0][39] = 14, [1][1][2][1][RTW89_IC][1][39] = 10, + [1][1][2][1][RTW89_IC][2][39] = 70, [1][1][2][1][RTW89_KCC][1][39] = 28, [1][1][2][1][RTW89_KCC][0][39] = 14, [1][1][2][1][RTW89_ACMA][1][39] = 42, @@ -44315,6 +45923,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][39] = 6, [1][1][2][1][RTW89_UK][1][39] = 42, [1][1][2][1][RTW89_UK][0][39] = 6, + [1][1][2][1][RTW89_THAILAND][1][39] = 46, + [1][1][2][1][RTW89_THAILAND][0][39] = 6, [1][1][2][1][RTW89_FCC][1][43] = 10, [1][1][2][1][RTW89_FCC][2][43] = 70, [1][1][2][1][RTW89_ETSI][1][43] = 42, @@ -44322,6 +45932,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][43] = 52, [1][1][2][1][RTW89_MKK][0][43] = 14, [1][1][2][1][RTW89_IC][1][43] = 10, + [1][1][2][1][RTW89_IC][2][43] = 70, [1][1][2][1][RTW89_KCC][1][43] = 28, [1][1][2][1][RTW89_KCC][0][43] = 14, [1][1][2][1][RTW89_ACMA][1][43] = 42, @@ -44331,6 +45942,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][43] = 6, [1][1][2][1][RTW89_UK][1][43] = 42, [1][1][2][1][RTW89_UK][0][43] = 6, + [1][1][2][1][RTW89_THAILAND][1][43] = 46, + [1][1][2][1][RTW89_THAILAND][0][43] = 6, [1][1][2][1][RTW89_FCC][1][46] = 12, [1][1][2][1][RTW89_FCC][2][46] = 127, [1][1][2][1][RTW89_ETSI][1][46] = 127, @@ -44338,6 +45951,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][46] = 127, [1][1][2][1][RTW89_MKK][0][46] = 127, [1][1][2][1][RTW89_IC][1][46] = 12, + [1][1][2][1][RTW89_IC][2][46] = 68, [1][1][2][1][RTW89_KCC][1][46] = 28, [1][1][2][1][RTW89_KCC][0][46] = 127, [1][1][2][1][RTW89_ACMA][1][46] = 127, @@ -44347,6 +45961,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][46] = 127, [1][1][2][1][RTW89_UK][1][46] = 127, [1][1][2][1][RTW89_UK][0][46] = 127, + [1][1][2][1][RTW89_THAILAND][1][46] = 127, + [1][1][2][1][RTW89_THAILAND][0][46] = 127, [1][1][2][1][RTW89_FCC][1][50] = 12, [1][1][2][1][RTW89_FCC][2][50] = 127, [1][1][2][1][RTW89_ETSI][1][50] = 127, @@ -44354,6 +45970,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][50] = 127, [1][1][2][1][RTW89_MKK][0][50] = 127, [1][1][2][1][RTW89_IC][1][50] = 12, + [1][1][2][1][RTW89_IC][2][50] = 68, [1][1][2][1][RTW89_KCC][1][50] = 28, [1][1][2][1][RTW89_KCC][0][50] = 127, [1][1][2][1][RTW89_ACMA][1][50] = 127, @@ -44363,6 +45980,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][50] = 127, [1][1][2][1][RTW89_UK][1][50] = 127, [1][1][2][1][RTW89_UK][0][50] = 127, + [1][1][2][1][RTW89_THAILAND][1][50] = 127, + [1][1][2][1][RTW89_THAILAND][0][50] = 127, [1][1][2][1][RTW89_FCC][1][54] = 10, [1][1][2][1][RTW89_FCC][2][54] = 127, [1][1][2][1][RTW89_ETSI][1][54] = 127, @@ -44370,6 +45989,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][54] = 127, [1][1][2][1][RTW89_MKK][0][54] = 127, [1][1][2][1][RTW89_IC][1][54] = 10, + [1][1][2][1][RTW89_IC][2][54] = 127, [1][1][2][1][RTW89_KCC][1][54] = 28, [1][1][2][1][RTW89_KCC][0][54] = 127, [1][1][2][1][RTW89_ACMA][1][54] = 127, @@ -44379,6 +45999,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][54] = 127, [1][1][2][1][RTW89_UK][1][54] = 127, [1][1][2][1][RTW89_UK][0][54] = 127, + [1][1][2][1][RTW89_THAILAND][1][54] = 127, + [1][1][2][1][RTW89_THAILAND][0][54] = 127, [1][1][2][1][RTW89_FCC][1][58] = 10, [1][1][2][1][RTW89_FCC][2][58] = 66, [1][1][2][1][RTW89_ETSI][1][58] = 127, @@ -44386,6 +46008,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][58] = 127, [1][1][2][1][RTW89_MKK][0][58] = 127, [1][1][2][1][RTW89_IC][1][58] = 10, + [1][1][2][1][RTW89_IC][2][58] = 66, [1][1][2][1][RTW89_KCC][1][58] = 28, [1][1][2][1][RTW89_KCC][0][58] = 127, [1][1][2][1][RTW89_ACMA][1][58] = 127, @@ -44395,6 +46018,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][58] = 127, [1][1][2][1][RTW89_UK][1][58] = 127, [1][1][2][1][RTW89_UK][0][58] = 127, + [1][1][2][1][RTW89_THAILAND][1][58] = 127, + [1][1][2][1][RTW89_THAILAND][0][58] = 127, [1][1][2][1][RTW89_FCC][1][61] = 10, [1][1][2][1][RTW89_FCC][2][61] = 66, [1][1][2][1][RTW89_ETSI][1][61] = 127, @@ -44402,6 +46027,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][61] = 127, [1][1][2][1][RTW89_MKK][0][61] = 127, [1][1][2][1][RTW89_IC][1][61] = 10, + [1][1][2][1][RTW89_IC][2][61] = 66, [1][1][2][1][RTW89_KCC][1][61] = 28, [1][1][2][1][RTW89_KCC][0][61] = 127, [1][1][2][1][RTW89_ACMA][1][61] = 127, @@ -44411,6 +46037,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][61] = 127, [1][1][2][1][RTW89_UK][1][61] = 127, [1][1][2][1][RTW89_UK][0][61] = 127, + [1][1][2][1][RTW89_THAILAND][1][61] = 127, + [1][1][2][1][RTW89_THAILAND][0][61] = 127, [1][1][2][1][RTW89_FCC][1][65] = 10, [1][1][2][1][RTW89_FCC][2][65] = 66, [1][1][2][1][RTW89_ETSI][1][65] = 127, @@ -44418,6 +46046,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][65] = 127, [1][1][2][1][RTW89_MKK][0][65] = 127, [1][1][2][1][RTW89_IC][1][65] = 10, + [1][1][2][1][RTW89_IC][2][65] = 66, [1][1][2][1][RTW89_KCC][1][65] = 28, [1][1][2][1][RTW89_KCC][0][65] = 127, [1][1][2][1][RTW89_ACMA][1][65] = 127, @@ -44427,6 +46056,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][65] = 127, [1][1][2][1][RTW89_UK][1][65] = 127, [1][1][2][1][RTW89_UK][0][65] = 127, + [1][1][2][1][RTW89_THAILAND][1][65] = 127, + [1][1][2][1][RTW89_THAILAND][0][65] = 127, [1][1][2][1][RTW89_FCC][1][69] = 10, [1][1][2][1][RTW89_FCC][2][69] = 66, [1][1][2][1][RTW89_ETSI][1][69] = 127, @@ -44434,6 +46065,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][69] = 127, [1][1][2][1][RTW89_MKK][0][69] = 127, [1][1][2][1][RTW89_IC][1][69] = 10, + [1][1][2][1][RTW89_IC][2][69] = 66, [1][1][2][1][RTW89_KCC][1][69] = 28, [1][1][2][1][RTW89_KCC][0][69] = 127, [1][1][2][1][RTW89_ACMA][1][69] = 127, @@ -44443,6 +46075,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][69] = 127, [1][1][2][1][RTW89_UK][1][69] = 127, [1][1][2][1][RTW89_UK][0][69] = 127, + [1][1][2][1][RTW89_THAILAND][1][69] = 127, + [1][1][2][1][RTW89_THAILAND][0][69] = 127, [1][1][2][1][RTW89_FCC][1][73] = 10, [1][1][2][1][RTW89_FCC][2][73] = 66, [1][1][2][1][RTW89_ETSI][1][73] = 127, @@ -44450,6 +46084,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][73] = 127, [1][1][2][1][RTW89_MKK][0][73] = 127, [1][1][2][1][RTW89_IC][1][73] = 10, + [1][1][2][1][RTW89_IC][2][73] = 66, [1][1][2][1][RTW89_KCC][1][73] = 28, [1][1][2][1][RTW89_KCC][0][73] = 127, [1][1][2][1][RTW89_ACMA][1][73] = 127, @@ -44459,6 +46094,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][73] = 127, [1][1][2][1][RTW89_UK][1][73] = 127, [1][1][2][1][RTW89_UK][0][73] = 127, + [1][1][2][1][RTW89_THAILAND][1][73] = 127, + [1][1][2][1][RTW89_THAILAND][0][73] = 127, [1][1][2][1][RTW89_FCC][1][76] = 10, [1][1][2][1][RTW89_FCC][2][76] = 66, [1][1][2][1][RTW89_ETSI][1][76] = 127, @@ -44466,6 +46103,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][76] = 127, [1][1][2][1][RTW89_MKK][0][76] = 127, [1][1][2][1][RTW89_IC][1][76] = 10, + [1][1][2][1][RTW89_IC][2][76] = 66, [1][1][2][1][RTW89_KCC][1][76] = 28, [1][1][2][1][RTW89_KCC][0][76] = 127, [1][1][2][1][RTW89_ACMA][1][76] = 127, @@ -44475,6 +46113,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][76] = 127, [1][1][2][1][RTW89_UK][1][76] = 127, [1][1][2][1][RTW89_UK][0][76] = 127, + [1][1][2][1][RTW89_THAILAND][1][76] = 127, + [1][1][2][1][RTW89_THAILAND][0][76] = 127, [1][1][2][1][RTW89_FCC][1][80] = 10, [1][1][2][1][RTW89_FCC][2][80] = 66, [1][1][2][1][RTW89_ETSI][1][80] = 127, @@ -44482,6 +46122,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][80] = 127, [1][1][2][1][RTW89_MKK][0][80] = 127, [1][1][2][1][RTW89_IC][1][80] = 10, + [1][1][2][1][RTW89_IC][2][80] = 66, [1][1][2][1][RTW89_KCC][1][80] = 32, [1][1][2][1][RTW89_KCC][0][80] = 127, [1][1][2][1][RTW89_ACMA][1][80] = 127, @@ -44491,6 +46132,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][80] = 127, [1][1][2][1][RTW89_UK][1][80] = 127, [1][1][2][1][RTW89_UK][0][80] = 127, + [1][1][2][1][RTW89_THAILAND][1][80] = 127, + [1][1][2][1][RTW89_THAILAND][0][80] = 127, [1][1][2][1][RTW89_FCC][1][84] = 10, [1][1][2][1][RTW89_FCC][2][84] = 66, [1][1][2][1][RTW89_ETSI][1][84] = 127, @@ -44498,6 +46141,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][84] = 127, [1][1][2][1][RTW89_MKK][0][84] = 127, [1][1][2][1][RTW89_IC][1][84] = 10, + [1][1][2][1][RTW89_IC][2][84] = 66, [1][1][2][1][RTW89_KCC][1][84] = 32, [1][1][2][1][RTW89_KCC][0][84] = 127, [1][1][2][1][RTW89_ACMA][1][84] = 127, @@ -44507,6 +46151,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][84] = 127, [1][1][2][1][RTW89_UK][1][84] = 127, [1][1][2][1][RTW89_UK][0][84] = 127, + [1][1][2][1][RTW89_THAILAND][1][84] = 127, + [1][1][2][1][RTW89_THAILAND][0][84] = 127, [1][1][2][1][RTW89_FCC][1][88] = 10, [1][1][2][1][RTW89_FCC][2][88] = 127, [1][1][2][1][RTW89_ETSI][1][88] = 127, @@ -44514,6 +46160,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][88] = 127, [1][1][2][1][RTW89_MKK][0][88] = 127, [1][1][2][1][RTW89_IC][1][88] = 10, + [1][1][2][1][RTW89_IC][2][88] = 127, [1][1][2][1][RTW89_KCC][1][88] = 32, [1][1][2][1][RTW89_KCC][0][88] = 127, [1][1][2][1][RTW89_ACMA][1][88] = 127, @@ -44523,6 +46170,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][88] = 127, [1][1][2][1][RTW89_UK][1][88] = 127, [1][1][2][1][RTW89_UK][0][88] = 127, + [1][1][2][1][RTW89_THAILAND][1][88] = 127, + [1][1][2][1][RTW89_THAILAND][0][88] = 127, [1][1][2][1][RTW89_FCC][1][91] = 12, [1][1][2][1][RTW89_FCC][2][91] = 127, [1][1][2][1][RTW89_ETSI][1][91] = 127, @@ -44530,6 +46179,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][91] = 127, [1][1][2][1][RTW89_MKK][0][91] = 127, [1][1][2][1][RTW89_IC][1][91] = 12, + [1][1][2][1][RTW89_IC][2][91] = 127, [1][1][2][1][RTW89_KCC][1][91] = 32, [1][1][2][1][RTW89_KCC][0][91] = 127, [1][1][2][1][RTW89_ACMA][1][91] = 127, @@ -44539,6 +46189,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][91] = 127, [1][1][2][1][RTW89_UK][1][91] = 127, [1][1][2][1][RTW89_UK][0][91] = 127, + [1][1][2][1][RTW89_THAILAND][1][91] = 127, + [1][1][2][1][RTW89_THAILAND][0][91] = 127, [1][1][2][1][RTW89_FCC][1][95] = 10, [1][1][2][1][RTW89_FCC][2][95] = 127, [1][1][2][1][RTW89_ETSI][1][95] = 127, @@ -44546,6 +46198,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][95] = 127, [1][1][2][1][RTW89_MKK][0][95] = 127, [1][1][2][1][RTW89_IC][1][95] = 10, + [1][1][2][1][RTW89_IC][2][95] = 127, [1][1][2][1][RTW89_KCC][1][95] = 32, [1][1][2][1][RTW89_KCC][0][95] = 127, [1][1][2][1][RTW89_ACMA][1][95] = 127, @@ -44555,6 +46208,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][95] = 127, [1][1][2][1][RTW89_UK][1][95] = 127, [1][1][2][1][RTW89_UK][0][95] = 127, + [1][1][2][1][RTW89_THAILAND][1][95] = 127, + [1][1][2][1][RTW89_THAILAND][0][95] = 127, [1][1][2][1][RTW89_FCC][1][99] = 10, [1][1][2][1][RTW89_FCC][2][99] = 127, [1][1][2][1][RTW89_ETSI][1][99] = 127, @@ -44562,6 +46217,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][99] = 127, [1][1][2][1][RTW89_MKK][0][99] = 127, [1][1][2][1][RTW89_IC][1][99] = 10, + [1][1][2][1][RTW89_IC][2][99] = 127, [1][1][2][1][RTW89_KCC][1][99] = 32, [1][1][2][1][RTW89_KCC][0][99] = 127, [1][1][2][1][RTW89_ACMA][1][99] = 127, @@ -44571,6 +46227,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][99] = 127, [1][1][2][1][RTW89_UK][1][99] = 127, [1][1][2][1][RTW89_UK][0][99] = 127, + [1][1][2][1][RTW89_THAILAND][1][99] = 127, + [1][1][2][1][RTW89_THAILAND][0][99] = 127, [1][1][2][1][RTW89_FCC][1][103] = 10, [1][1][2][1][RTW89_FCC][2][103] = 127, [1][1][2][1][RTW89_ETSI][1][103] = 127, @@ -44578,6 +46236,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][103] = 127, [1][1][2][1][RTW89_MKK][0][103] = 127, [1][1][2][1][RTW89_IC][1][103] = 10, + [1][1][2][1][RTW89_IC][2][103] = 127, [1][1][2][1][RTW89_KCC][1][103] = 32, [1][1][2][1][RTW89_KCC][0][103] = 127, [1][1][2][1][RTW89_ACMA][1][103] = 127, @@ -44587,6 +46246,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][103] = 127, [1][1][2][1][RTW89_UK][1][103] = 127, [1][1][2][1][RTW89_UK][0][103] = 127, + [1][1][2][1][RTW89_THAILAND][1][103] = 127, + [1][1][2][1][RTW89_THAILAND][0][103] = 127, [1][1][2][1][RTW89_FCC][1][106] = 12, [1][1][2][1][RTW89_FCC][2][106] = 127, [1][1][2][1][RTW89_ETSI][1][106] = 127, @@ -44594,6 +46255,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][106] = 127, [1][1][2][1][RTW89_MKK][0][106] = 127, [1][1][2][1][RTW89_IC][1][106] = 12, + [1][1][2][1][RTW89_IC][2][106] = 127, [1][1][2][1][RTW89_KCC][1][106] = 32, [1][1][2][1][RTW89_KCC][0][106] = 127, [1][1][2][1][RTW89_ACMA][1][106] = 127, @@ -44603,6 +46265,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][106] = 127, [1][1][2][1][RTW89_UK][1][106] = 127, [1][1][2][1][RTW89_UK][0][106] = 127, + [1][1][2][1][RTW89_THAILAND][1][106] = 127, + [1][1][2][1][RTW89_THAILAND][0][106] = 127, [1][1][2][1][RTW89_FCC][1][110] = 127, [1][1][2][1][RTW89_FCC][2][110] = 127, [1][1][2][1][RTW89_ETSI][1][110] = 127, @@ -44610,6 +46274,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][110] = 127, [1][1][2][1][RTW89_MKK][0][110] = 127, [1][1][2][1][RTW89_IC][1][110] = 127, + [1][1][2][1][RTW89_IC][2][110] = 127, [1][1][2][1][RTW89_KCC][1][110] = 127, [1][1][2][1][RTW89_KCC][0][110] = 127, [1][1][2][1][RTW89_ACMA][1][110] = 127, @@ -44619,6 +46284,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][110] = 127, [1][1][2][1][RTW89_UK][1][110] = 127, [1][1][2][1][RTW89_UK][0][110] = 127, + [1][1][2][1][RTW89_THAILAND][1][110] = 127, + [1][1][2][1][RTW89_THAILAND][0][110] = 127, [1][1][2][1][RTW89_FCC][1][114] = 127, [1][1][2][1][RTW89_FCC][2][114] = 127, [1][1][2][1][RTW89_ETSI][1][114] = 127, @@ -44626,6 +46293,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][114] = 127, [1][1][2][1][RTW89_MKK][0][114] = 127, [1][1][2][1][RTW89_IC][1][114] = 127, + [1][1][2][1][RTW89_IC][2][114] = 127, [1][1][2][1][RTW89_KCC][1][114] = 127, [1][1][2][1][RTW89_KCC][0][114] = 127, [1][1][2][1][RTW89_ACMA][1][114] = 127, @@ -44635,6 +46303,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][114] = 127, [1][1][2][1][RTW89_UK][1][114] = 127, [1][1][2][1][RTW89_UK][0][114] = 127, + [1][1][2][1][RTW89_THAILAND][1][114] = 127, + [1][1][2][1][RTW89_THAILAND][0][114] = 127, [1][1][2][1][RTW89_FCC][1][118] = 127, [1][1][2][1][RTW89_FCC][2][118] = 127, [1][1][2][1][RTW89_ETSI][1][118] = 127, @@ -44642,6 +46312,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][118] = 127, [1][1][2][1][RTW89_MKK][0][118] = 127, [1][1][2][1][RTW89_IC][1][118] = 127, + [1][1][2][1][RTW89_IC][2][118] = 127, [1][1][2][1][RTW89_KCC][1][118] = 127, [1][1][2][1][RTW89_KCC][0][118] = 127, [1][1][2][1][RTW89_ACMA][1][118] = 127, @@ -44651,6 +46322,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][118] = 127, [1][1][2][1][RTW89_UK][1][118] = 127, [1][1][2][1][RTW89_UK][0][118] = 127, + [1][1][2][1][RTW89_THAILAND][1][118] = 127, + [1][1][2][1][RTW89_THAILAND][0][118] = 127, [2][0][2][0][RTW89_FCC][1][3] = 46, [2][0][2][0][RTW89_FCC][2][3] = 60, [2][0][2][0][RTW89_ETSI][1][3] = 58, @@ -44658,6 +46331,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][3] = 58, [2][0][2][0][RTW89_MKK][0][3] = 26, [2][0][2][0][RTW89_IC][1][3] = 46, + [2][0][2][0][RTW89_IC][2][3] = 60, [2][0][2][0][RTW89_KCC][1][3] = 50, [2][0][2][0][RTW89_KCC][0][3] = 24, [2][0][2][0][RTW89_ACMA][1][3] = 58, @@ -44667,6 +46341,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][3] = 30, [2][0][2][0][RTW89_UK][1][3] = 58, [2][0][2][0][RTW89_UK][0][3] = 30, + [2][0][2][0][RTW89_THAILAND][1][3] = 58, + [2][0][2][0][RTW89_THAILAND][0][3] = 30, [2][0][2][0][RTW89_FCC][1][11] = 46, [2][0][2][0][RTW89_FCC][2][11] = 60, [2][0][2][0][RTW89_ETSI][1][11] = 58, @@ -44674,6 +46350,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][11] = 58, [2][0][2][0][RTW89_MKK][0][11] = 24, [2][0][2][0][RTW89_IC][1][11] = 46, + [2][0][2][0][RTW89_IC][2][11] = 60, [2][0][2][0][RTW89_KCC][1][11] = 50, [2][0][2][0][RTW89_KCC][0][11] = 24, [2][0][2][0][RTW89_ACMA][1][11] = 58, @@ -44683,6 +46360,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][11] = 30, [2][0][2][0][RTW89_UK][1][11] = 58, [2][0][2][0][RTW89_UK][0][11] = 30, + [2][0][2][0][RTW89_THAILAND][1][11] = 58, + [2][0][2][0][RTW89_THAILAND][0][11] = 30, [2][0][2][0][RTW89_FCC][1][18] = 46, [2][0][2][0][RTW89_FCC][2][18] = 60, [2][0][2][0][RTW89_ETSI][1][18] = 58, @@ -44690,6 +46369,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][18] = 58, [2][0][2][0][RTW89_MKK][0][18] = 24, [2][0][2][0][RTW89_IC][1][18] = 46, + [2][0][2][0][RTW89_IC][2][18] = 60, [2][0][2][0][RTW89_KCC][1][18] = 50, [2][0][2][0][RTW89_KCC][0][18] = 24, [2][0][2][0][RTW89_ACMA][1][18] = 58, @@ -44699,6 +46379,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][18] = 30, [2][0][2][0][RTW89_UK][1][18] = 58, [2][0][2][0][RTW89_UK][0][18] = 30, + [2][0][2][0][RTW89_THAILAND][1][18] = 58, + [2][0][2][0][RTW89_THAILAND][0][18] = 30, [2][0][2][0][RTW89_FCC][1][26] = 46, [2][0][2][0][RTW89_FCC][2][26] = 60, [2][0][2][0][RTW89_ETSI][1][26] = 58, @@ -44706,6 +46388,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][26] = 58, [2][0][2][0][RTW89_MKK][0][26] = 24, [2][0][2][0][RTW89_IC][1][26] = 46, + [2][0][2][0][RTW89_IC][2][26] = 60, [2][0][2][0][RTW89_KCC][1][26] = 50, [2][0][2][0][RTW89_KCC][0][26] = 26, [2][0][2][0][RTW89_ACMA][1][26] = 58, @@ -44715,6 +46398,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][26] = 30, [2][0][2][0][RTW89_UK][1][26] = 58, [2][0][2][0][RTW89_UK][0][26] = 30, + [2][0][2][0][RTW89_THAILAND][1][26] = 58, + [2][0][2][0][RTW89_THAILAND][0][26] = 30, [2][0][2][0][RTW89_FCC][1][33] = 46, [2][0][2][0][RTW89_FCC][2][33] = 60, [2][0][2][0][RTW89_ETSI][1][33] = 58, @@ -44722,6 +46407,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][33] = 58, [2][0][2][0][RTW89_MKK][0][33] = 24, [2][0][2][0][RTW89_IC][1][33] = 46, + [2][0][2][0][RTW89_IC][2][33] = 60, [2][0][2][0][RTW89_KCC][1][33] = 50, [2][0][2][0][RTW89_KCC][0][33] = 24, [2][0][2][0][RTW89_ACMA][1][33] = 58, @@ -44731,6 +46417,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][33] = 30, [2][0][2][0][RTW89_UK][1][33] = 58, [2][0][2][0][RTW89_UK][0][33] = 30, + [2][0][2][0][RTW89_THAILAND][1][33] = 58, + [2][0][2][0][RTW89_THAILAND][0][33] = 30, [2][0][2][0][RTW89_FCC][1][41] = 46, [2][0][2][0][RTW89_FCC][2][41] = 60, [2][0][2][0][RTW89_ETSI][1][41] = 58, @@ -44738,6 +46426,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][41] = 58, [2][0][2][0][RTW89_MKK][0][41] = 24, [2][0][2][0][RTW89_IC][1][41] = 46, + [2][0][2][0][RTW89_IC][2][41] = 60, [2][0][2][0][RTW89_KCC][1][41] = 50, [2][0][2][0][RTW89_KCC][0][41] = 24, [2][0][2][0][RTW89_ACMA][1][41] = 58, @@ -44747,6 +46436,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][41] = 30, [2][0][2][0][RTW89_UK][1][41] = 58, [2][0][2][0][RTW89_UK][0][41] = 30, + [2][0][2][0][RTW89_THAILAND][1][41] = 58, + [2][0][2][0][RTW89_THAILAND][0][41] = 30, [2][0][2][0][RTW89_FCC][1][48] = 46, [2][0][2][0][RTW89_FCC][2][48] = 127, [2][0][2][0][RTW89_ETSI][1][48] = 127, @@ -44754,6 +46445,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][48] = 127, [2][0][2][0][RTW89_MKK][0][48] = 127, [2][0][2][0][RTW89_IC][1][48] = 46, + [2][0][2][0][RTW89_IC][2][48] = 60, [2][0][2][0][RTW89_KCC][1][48] = 48, [2][0][2][0][RTW89_KCC][0][48] = 127, [2][0][2][0][RTW89_ACMA][1][48] = 127, @@ -44763,6 +46455,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][48] = 127, [2][0][2][0][RTW89_UK][1][48] = 127, [2][0][2][0][RTW89_UK][0][48] = 127, + [2][0][2][0][RTW89_THAILAND][1][48] = 127, + [2][0][2][0][RTW89_THAILAND][0][48] = 127, [2][0][2][0][RTW89_FCC][1][56] = 46, [2][0][2][0][RTW89_FCC][2][56] = 127, [2][0][2][0][RTW89_ETSI][1][56] = 127, @@ -44770,6 +46464,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][56] = 127, [2][0][2][0][RTW89_MKK][0][56] = 127, [2][0][2][0][RTW89_IC][1][56] = 46, + [2][0][2][0][RTW89_IC][2][56] = 58, [2][0][2][0][RTW89_KCC][1][56] = 48, [2][0][2][0][RTW89_KCC][0][56] = 127, [2][0][2][0][RTW89_ACMA][1][56] = 127, @@ -44779,6 +46474,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][56] = 127, [2][0][2][0][RTW89_UK][1][56] = 127, [2][0][2][0][RTW89_UK][0][56] = 127, + [2][0][2][0][RTW89_THAILAND][1][56] = 127, + [2][0][2][0][RTW89_THAILAND][0][56] = 127, [2][0][2][0][RTW89_FCC][1][63] = 46, [2][0][2][0][RTW89_FCC][2][63] = 58, [2][0][2][0][RTW89_ETSI][1][63] = 127, @@ -44786,6 +46483,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][63] = 127, [2][0][2][0][RTW89_MKK][0][63] = 127, [2][0][2][0][RTW89_IC][1][63] = 46, + [2][0][2][0][RTW89_IC][2][63] = 58, [2][0][2][0][RTW89_KCC][1][63] = 48, [2][0][2][0][RTW89_KCC][0][63] = 127, [2][0][2][0][RTW89_ACMA][1][63] = 127, @@ -44795,6 +46493,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][63] = 127, [2][0][2][0][RTW89_UK][1][63] = 127, [2][0][2][0][RTW89_UK][0][63] = 127, + [2][0][2][0][RTW89_THAILAND][1][63] = 127, + [2][0][2][0][RTW89_THAILAND][0][63] = 127, [2][0][2][0][RTW89_FCC][1][71] = 46, [2][0][2][0][RTW89_FCC][2][71] = 58, [2][0][2][0][RTW89_ETSI][1][71] = 127, @@ -44802,6 +46502,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][71] = 127, [2][0][2][0][RTW89_MKK][0][71] = 127, [2][0][2][0][RTW89_IC][1][71] = 46, + [2][0][2][0][RTW89_IC][2][71] = 58, [2][0][2][0][RTW89_KCC][1][71] = 48, [2][0][2][0][RTW89_KCC][0][71] = 127, [2][0][2][0][RTW89_ACMA][1][71] = 127, @@ -44811,6 +46512,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][71] = 127, [2][0][2][0][RTW89_UK][1][71] = 127, [2][0][2][0][RTW89_UK][0][71] = 127, + [2][0][2][0][RTW89_THAILAND][1][71] = 127, + [2][0][2][0][RTW89_THAILAND][0][71] = 127, [2][0][2][0][RTW89_FCC][1][78] = 46, [2][0][2][0][RTW89_FCC][2][78] = 58, [2][0][2][0][RTW89_ETSI][1][78] = 127, @@ -44818,6 +46521,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][78] = 127, [2][0][2][0][RTW89_MKK][0][78] = 127, [2][0][2][0][RTW89_IC][1][78] = 46, + [2][0][2][0][RTW89_IC][2][78] = 58, [2][0][2][0][RTW89_KCC][1][78] = 52, [2][0][2][0][RTW89_KCC][0][78] = 127, [2][0][2][0][RTW89_ACMA][1][78] = 127, @@ -44827,6 +46531,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][78] = 127, [2][0][2][0][RTW89_UK][1][78] = 127, [2][0][2][0][RTW89_UK][0][78] = 127, + [2][0][2][0][RTW89_THAILAND][1][78] = 127, + [2][0][2][0][RTW89_THAILAND][0][78] = 127, [2][0][2][0][RTW89_FCC][1][86] = 46, [2][0][2][0][RTW89_FCC][2][86] = 127, [2][0][2][0][RTW89_ETSI][1][86] = 127, @@ -44834,6 +46540,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][86] = 127, [2][0][2][0][RTW89_MKK][0][86] = 127, [2][0][2][0][RTW89_IC][1][86] = 46, + [2][0][2][0][RTW89_IC][2][86] = 127, [2][0][2][0][RTW89_KCC][1][86] = 52, [2][0][2][0][RTW89_KCC][0][86] = 127, [2][0][2][0][RTW89_ACMA][1][86] = 127, @@ -44843,6 +46550,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][86] = 127, [2][0][2][0][RTW89_UK][1][86] = 127, [2][0][2][0][RTW89_UK][0][86] = 127, + [2][0][2][0][RTW89_THAILAND][1][86] = 127, + [2][0][2][0][RTW89_THAILAND][0][86] = 127, [2][0][2][0][RTW89_FCC][1][93] = 46, [2][0][2][0][RTW89_FCC][2][93] = 127, [2][0][2][0][RTW89_ETSI][1][93] = 127, @@ -44850,6 +46559,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][93] = 127, [2][0][2][0][RTW89_MKK][0][93] = 127, [2][0][2][0][RTW89_IC][1][93] = 46, + [2][0][2][0][RTW89_IC][2][93] = 127, [2][0][2][0][RTW89_KCC][1][93] = 50, [2][0][2][0][RTW89_KCC][0][93] = 127, [2][0][2][0][RTW89_ACMA][1][93] = 127, @@ -44859,6 +46569,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][93] = 127, [2][0][2][0][RTW89_UK][1][93] = 127, [2][0][2][0][RTW89_UK][0][93] = 127, + [2][0][2][0][RTW89_THAILAND][1][93] = 127, + [2][0][2][0][RTW89_THAILAND][0][93] = 127, [2][0][2][0][RTW89_FCC][1][101] = 44, [2][0][2][0][RTW89_FCC][2][101] = 127, [2][0][2][0][RTW89_ETSI][1][101] = 127, @@ -44866,6 +46578,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][101] = 127, [2][0][2][0][RTW89_MKK][0][101] = 127, [2][0][2][0][RTW89_IC][1][101] = 44, + [2][0][2][0][RTW89_IC][2][101] = 127, [2][0][2][0][RTW89_KCC][1][101] = 50, [2][0][2][0][RTW89_KCC][0][101] = 127, [2][0][2][0][RTW89_ACMA][1][101] = 127, @@ -44875,6 +46588,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][101] = 127, [2][0][2][0][RTW89_UK][1][101] = 127, [2][0][2][0][RTW89_UK][0][101] = 127, + [2][0][2][0][RTW89_THAILAND][1][101] = 127, + [2][0][2][0][RTW89_THAILAND][0][101] = 127, [2][0][2][0][RTW89_FCC][1][108] = 127, [2][0][2][0][RTW89_FCC][2][108] = 127, [2][0][2][0][RTW89_ETSI][1][108] = 127, @@ -44882,6 +46597,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][108] = 127, [2][0][2][0][RTW89_MKK][0][108] = 127, [2][0][2][0][RTW89_IC][1][108] = 127, + [2][0][2][0][RTW89_IC][2][108] = 127, [2][0][2][0][RTW89_KCC][1][108] = 127, [2][0][2][0][RTW89_KCC][0][108] = 127, [2][0][2][0][RTW89_ACMA][1][108] = 127, @@ -44891,6 +46607,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][108] = 127, [2][0][2][0][RTW89_UK][1][108] = 127, [2][0][2][0][RTW89_UK][0][108] = 127, + [2][0][2][0][RTW89_THAILAND][1][108] = 127, + [2][0][2][0][RTW89_THAILAND][0][108] = 127, [2][0][2][0][RTW89_FCC][1][116] = 127, [2][0][2][0][RTW89_FCC][2][116] = 127, [2][0][2][0][RTW89_ETSI][1][116] = 127, @@ -44898,6 +46616,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][116] = 127, [2][0][2][0][RTW89_MKK][0][116] = 127, [2][0][2][0][RTW89_IC][1][116] = 127, + [2][0][2][0][RTW89_IC][2][116] = 127, [2][0][2][0][RTW89_KCC][1][116] = 127, [2][0][2][0][RTW89_KCC][0][116] = 127, [2][0][2][0][RTW89_ACMA][1][116] = 127, @@ -44907,6 +46626,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][116] = 127, [2][0][2][0][RTW89_UK][1][116] = 127, [2][0][2][0][RTW89_UK][0][116] = 127, + [2][0][2][0][RTW89_THAILAND][1][116] = 127, + [2][0][2][0][RTW89_THAILAND][0][116] = 127, [2][1][2][0][RTW89_FCC][1][3] = 22, [2][1][2][0][RTW89_FCC][2][3] = 50, [2][1][2][0][RTW89_ETSI][1][3] = 54, @@ -44914,6 +46635,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][3] = 52, [2][1][2][0][RTW89_MKK][0][3] = 14, [2][1][2][0][RTW89_IC][1][3] = 22, + [2][1][2][0][RTW89_IC][2][3] = 50, [2][1][2][0][RTW89_KCC][1][3] = 38, [2][1][2][0][RTW89_KCC][0][3] = 12, [2][1][2][0][RTW89_ACMA][1][3] = 54, @@ -44923,6 +46645,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][3] = 16, [2][1][2][0][RTW89_UK][1][3] = 54, [2][1][2][0][RTW89_UK][0][3] = 16, + [2][1][2][0][RTW89_THAILAND][1][3] = 46, + [2][1][2][0][RTW89_THAILAND][0][3] = 18, [2][1][2][0][RTW89_FCC][1][11] = 20, [2][1][2][0][RTW89_FCC][2][11] = 50, [2][1][2][0][RTW89_ETSI][1][11] = 54, @@ -44930,6 +46654,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][11] = 52, [2][1][2][0][RTW89_MKK][0][11] = 12, [2][1][2][0][RTW89_IC][1][11] = 20, + [2][1][2][0][RTW89_IC][2][11] = 50, [2][1][2][0][RTW89_KCC][1][11] = 38, [2][1][2][0][RTW89_KCC][0][11] = 12, [2][1][2][0][RTW89_ACMA][1][11] = 54, @@ -44939,6 +46664,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][11] = 16, [2][1][2][0][RTW89_UK][1][11] = 54, [2][1][2][0][RTW89_UK][0][11] = 16, + [2][1][2][0][RTW89_THAILAND][1][11] = 46, + [2][1][2][0][RTW89_THAILAND][0][11] = 18, [2][1][2][0][RTW89_FCC][1][18] = 20, [2][1][2][0][RTW89_FCC][2][18] = 50, [2][1][2][0][RTW89_ETSI][1][18] = 54, @@ -44946,6 +46673,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][18] = 52, [2][1][2][0][RTW89_MKK][0][18] = 12, [2][1][2][0][RTW89_IC][1][18] = 20, + [2][1][2][0][RTW89_IC][2][18] = 50, [2][1][2][0][RTW89_KCC][1][18] = 38, [2][1][2][0][RTW89_KCC][0][18] = 12, [2][1][2][0][RTW89_ACMA][1][18] = 54, @@ -44955,6 +46683,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][18] = 16, [2][1][2][0][RTW89_UK][1][18] = 54, [2][1][2][0][RTW89_UK][0][18] = 16, + [2][1][2][0][RTW89_THAILAND][1][18] = 46, + [2][1][2][0][RTW89_THAILAND][0][18] = 18, [2][1][2][0][RTW89_FCC][1][26] = 20, [2][1][2][0][RTW89_FCC][2][26] = 60, [2][1][2][0][RTW89_ETSI][1][26] = 54, @@ -44962,6 +46692,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][26] = 52, [2][1][2][0][RTW89_MKK][0][26] = 12, [2][1][2][0][RTW89_IC][1][26] = 20, + [2][1][2][0][RTW89_IC][2][26] = 60, [2][1][2][0][RTW89_KCC][1][26] = 38, [2][1][2][0][RTW89_KCC][0][26] = 12, [2][1][2][0][RTW89_ACMA][1][26] = 54, @@ -44971,6 +46702,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][26] = 16, [2][1][2][0][RTW89_UK][1][26] = 54, [2][1][2][0][RTW89_UK][0][26] = 16, + [2][1][2][0][RTW89_THAILAND][1][26] = 46, + [2][1][2][0][RTW89_THAILAND][0][26] = 18, [2][1][2][0][RTW89_FCC][1][33] = 20, [2][1][2][0][RTW89_FCC][2][33] = 60, [2][1][2][0][RTW89_ETSI][1][33] = 54, @@ -44978,6 +46711,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][33] = 48, [2][1][2][0][RTW89_MKK][0][33] = 12, [2][1][2][0][RTW89_IC][1][33] = 20, + [2][1][2][0][RTW89_IC][2][33] = 60, [2][1][2][0][RTW89_KCC][1][33] = 38, [2][1][2][0][RTW89_KCC][0][33] = 12, [2][1][2][0][RTW89_ACMA][1][33] = 54, @@ -44987,6 +46721,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][33] = 16, [2][1][2][0][RTW89_UK][1][33] = 54, [2][1][2][0][RTW89_UK][0][33] = 16, + [2][1][2][0][RTW89_THAILAND][1][33] = 46, + [2][1][2][0][RTW89_THAILAND][0][33] = 18, [2][1][2][0][RTW89_FCC][1][41] = 22, [2][1][2][0][RTW89_FCC][2][41] = 60, [2][1][2][0][RTW89_ETSI][1][41] = 54, @@ -44994,6 +46730,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][41] = 48, [2][1][2][0][RTW89_MKK][0][41] = 12, [2][1][2][0][RTW89_IC][1][41] = 22, + [2][1][2][0][RTW89_IC][2][41] = 60, [2][1][2][0][RTW89_KCC][1][41] = 38, [2][1][2][0][RTW89_KCC][0][41] = 12, [2][1][2][0][RTW89_ACMA][1][41] = 54, @@ -45003,6 +46740,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][41] = 18, [2][1][2][0][RTW89_UK][1][41] = 54, [2][1][2][0][RTW89_UK][0][41] = 18, + [2][1][2][0][RTW89_THAILAND][1][41] = 46, + [2][1][2][0][RTW89_THAILAND][0][41] = 18, [2][1][2][0][RTW89_FCC][1][48] = 22, [2][1][2][0][RTW89_FCC][2][48] = 127, [2][1][2][0][RTW89_ETSI][1][48] = 127, @@ -45010,6 +46749,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][48] = 127, [2][1][2][0][RTW89_MKK][0][48] = 127, [2][1][2][0][RTW89_IC][1][48] = 22, + [2][1][2][0][RTW89_IC][2][48] = 60, [2][1][2][0][RTW89_KCC][1][48] = 38, [2][1][2][0][RTW89_KCC][0][48] = 127, [2][1][2][0][RTW89_ACMA][1][48] = 127, @@ -45019,6 +46759,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][48] = 127, [2][1][2][0][RTW89_UK][1][48] = 127, [2][1][2][0][RTW89_UK][0][48] = 127, + [2][1][2][0][RTW89_THAILAND][1][48] = 127, + [2][1][2][0][RTW89_THAILAND][0][48] = 127, [2][1][2][0][RTW89_FCC][1][56] = 20, [2][1][2][0][RTW89_FCC][2][56] = 127, [2][1][2][0][RTW89_ETSI][1][56] = 127, @@ -45026,6 +46768,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][56] = 127, [2][1][2][0][RTW89_MKK][0][56] = 127, [2][1][2][0][RTW89_IC][1][56] = 20, + [2][1][2][0][RTW89_IC][2][56] = 56, [2][1][2][0][RTW89_KCC][1][56] = 38, [2][1][2][0][RTW89_KCC][0][56] = 127, [2][1][2][0][RTW89_ACMA][1][56] = 127, @@ -45035,6 +46778,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][56] = 127, [2][1][2][0][RTW89_UK][1][56] = 127, [2][1][2][0][RTW89_UK][0][56] = 127, + [2][1][2][0][RTW89_THAILAND][1][56] = 127, + [2][1][2][0][RTW89_THAILAND][0][56] = 127, [2][1][2][0][RTW89_FCC][1][63] = 22, [2][1][2][0][RTW89_FCC][2][63] = 58, [2][1][2][0][RTW89_ETSI][1][63] = 127, @@ -45042,6 +46787,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][63] = 127, [2][1][2][0][RTW89_MKK][0][63] = 127, [2][1][2][0][RTW89_IC][1][63] = 22, + [2][1][2][0][RTW89_IC][2][63] = 58, [2][1][2][0][RTW89_KCC][1][63] = 38, [2][1][2][0][RTW89_KCC][0][63] = 127, [2][1][2][0][RTW89_ACMA][1][63] = 127, @@ -45051,6 +46797,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][63] = 127, [2][1][2][0][RTW89_UK][1][63] = 127, [2][1][2][0][RTW89_UK][0][63] = 127, + [2][1][2][0][RTW89_THAILAND][1][63] = 127, + [2][1][2][0][RTW89_THAILAND][0][63] = 127, [2][1][2][0][RTW89_FCC][1][71] = 20, [2][1][2][0][RTW89_FCC][2][71] = 58, [2][1][2][0][RTW89_ETSI][1][71] = 127, @@ -45058,6 +46806,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][71] = 127, [2][1][2][0][RTW89_MKK][0][71] = 127, [2][1][2][0][RTW89_IC][1][71] = 20, + [2][1][2][0][RTW89_IC][2][71] = 58, [2][1][2][0][RTW89_KCC][1][71] = 38, [2][1][2][0][RTW89_KCC][0][71] = 127, [2][1][2][0][RTW89_ACMA][1][71] = 127, @@ -45067,6 +46816,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][71] = 127, [2][1][2][0][RTW89_UK][1][71] = 127, [2][1][2][0][RTW89_UK][0][71] = 127, + [2][1][2][0][RTW89_THAILAND][1][71] = 127, + [2][1][2][0][RTW89_THAILAND][0][71] = 127, [2][1][2][0][RTW89_FCC][1][78] = 20, [2][1][2][0][RTW89_FCC][2][78] = 58, [2][1][2][0][RTW89_ETSI][1][78] = 127, @@ -45074,6 +46825,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][78] = 127, [2][1][2][0][RTW89_MKK][0][78] = 127, [2][1][2][0][RTW89_IC][1][78] = 20, + [2][1][2][0][RTW89_IC][2][78] = 58, [2][1][2][0][RTW89_KCC][1][78] = 38, [2][1][2][0][RTW89_KCC][0][78] = 127, [2][1][2][0][RTW89_ACMA][1][78] = 127, @@ -45083,6 +46835,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][78] = 127, [2][1][2][0][RTW89_UK][1][78] = 127, [2][1][2][0][RTW89_UK][0][78] = 127, + [2][1][2][0][RTW89_THAILAND][1][78] = 127, + [2][1][2][0][RTW89_THAILAND][0][78] = 127, [2][1][2][0][RTW89_FCC][1][86] = 20, [2][1][2][0][RTW89_FCC][2][86] = 127, [2][1][2][0][RTW89_ETSI][1][86] = 127, @@ -45090,6 +46844,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][86] = 127, [2][1][2][0][RTW89_MKK][0][86] = 127, [2][1][2][0][RTW89_IC][1][86] = 20, + [2][1][2][0][RTW89_IC][2][86] = 127, [2][1][2][0][RTW89_KCC][1][86] = 38, [2][1][2][0][RTW89_KCC][0][86] = 127, [2][1][2][0][RTW89_ACMA][1][86] = 127, @@ -45099,6 +46854,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][86] = 127, [2][1][2][0][RTW89_UK][1][86] = 127, [2][1][2][0][RTW89_UK][0][86] = 127, + [2][1][2][0][RTW89_THAILAND][1][86] = 127, + [2][1][2][0][RTW89_THAILAND][0][86] = 127, [2][1][2][0][RTW89_FCC][1][93] = 22, [2][1][2][0][RTW89_FCC][2][93] = 127, [2][1][2][0][RTW89_ETSI][1][93] = 127, @@ -45106,6 +46863,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][93] = 127, [2][1][2][0][RTW89_MKK][0][93] = 127, [2][1][2][0][RTW89_IC][1][93] = 22, + [2][1][2][0][RTW89_IC][2][93] = 127, [2][1][2][0][RTW89_KCC][1][93] = 38, [2][1][2][0][RTW89_KCC][0][93] = 127, [2][1][2][0][RTW89_ACMA][1][93] = 127, @@ -45115,6 +46873,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][93] = 127, [2][1][2][0][RTW89_UK][1][93] = 127, [2][1][2][0][RTW89_UK][0][93] = 127, + [2][1][2][0][RTW89_THAILAND][1][93] = 127, + [2][1][2][0][RTW89_THAILAND][0][93] = 127, [2][1][2][0][RTW89_FCC][1][101] = 22, [2][1][2][0][RTW89_FCC][2][101] = 127, [2][1][2][0][RTW89_ETSI][1][101] = 127, @@ -45122,6 +46882,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][101] = 127, [2][1][2][0][RTW89_MKK][0][101] = 127, [2][1][2][0][RTW89_IC][1][101] = 22, + [2][1][2][0][RTW89_IC][2][101] = 127, [2][1][2][0][RTW89_KCC][1][101] = 38, [2][1][2][0][RTW89_KCC][0][101] = 127, [2][1][2][0][RTW89_ACMA][1][101] = 127, @@ -45131,6 +46892,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][101] = 127, [2][1][2][0][RTW89_UK][1][101] = 127, [2][1][2][0][RTW89_UK][0][101] = 127, + [2][1][2][0][RTW89_THAILAND][1][101] = 127, + [2][1][2][0][RTW89_THAILAND][0][101] = 127, [2][1][2][0][RTW89_FCC][1][108] = 127, [2][1][2][0][RTW89_FCC][2][108] = 127, [2][1][2][0][RTW89_ETSI][1][108] = 127, @@ -45138,6 +46901,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][108] = 127, [2][1][2][0][RTW89_MKK][0][108] = 127, [2][1][2][0][RTW89_IC][1][108] = 127, + [2][1][2][0][RTW89_IC][2][108] = 127, [2][1][2][0][RTW89_KCC][1][108] = 127, [2][1][2][0][RTW89_KCC][0][108] = 127, [2][1][2][0][RTW89_ACMA][1][108] = 127, @@ -45147,6 +46911,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][108] = 127, [2][1][2][0][RTW89_UK][1][108] = 127, [2][1][2][0][RTW89_UK][0][108] = 127, + [2][1][2][0][RTW89_THAILAND][1][108] = 127, + [2][1][2][0][RTW89_THAILAND][0][108] = 127, [2][1][2][0][RTW89_FCC][1][116] = 127, [2][1][2][0][RTW89_FCC][2][116] = 127, [2][1][2][0][RTW89_ETSI][1][116] = 127, @@ -45154,6 +46920,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][116] = 127, [2][1][2][0][RTW89_MKK][0][116] = 127, [2][1][2][0][RTW89_IC][1][116] = 127, + [2][1][2][0][RTW89_IC][2][116] = 127, [2][1][2][0][RTW89_KCC][1][116] = 127, [2][1][2][0][RTW89_KCC][0][116] = 127, [2][1][2][0][RTW89_ACMA][1][116] = 127, @@ -45163,6 +46930,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][116] = 127, [2][1][2][0][RTW89_UK][1][116] = 127, [2][1][2][0][RTW89_UK][0][116] = 127, + [2][1][2][0][RTW89_THAILAND][1][116] = 127, + [2][1][2][0][RTW89_THAILAND][0][116] = 127, [2][1][2][1][RTW89_FCC][1][3] = 22, [2][1][2][1][RTW89_FCC][2][3] = 50, [2][1][2][1][RTW89_ETSI][1][3] = 42, @@ -45170,6 +46939,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][3] = 52, [2][1][2][1][RTW89_MKK][0][3] = 14, [2][1][2][1][RTW89_IC][1][3] = 22, + [2][1][2][1][RTW89_IC][2][3] = 50, [2][1][2][1][RTW89_KCC][1][3] = 38, [2][1][2][1][RTW89_KCC][0][3] = 12, [2][1][2][1][RTW89_ACMA][1][3] = 42, @@ -45179,6 +46949,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][3] = 6, [2][1][2][1][RTW89_UK][1][3] = 42, [2][1][2][1][RTW89_UK][0][3] = 6, + [2][1][2][1][RTW89_THAILAND][1][3] = 46, + [2][1][2][1][RTW89_THAILAND][0][3] = 6, [2][1][2][1][RTW89_FCC][1][11] = 20, [2][1][2][1][RTW89_FCC][2][11] = 50, [2][1][2][1][RTW89_ETSI][1][11] = 42, @@ -45186,6 +46958,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][11] = 52, [2][1][2][1][RTW89_MKK][0][11] = 12, [2][1][2][1][RTW89_IC][1][11] = 20, + [2][1][2][1][RTW89_IC][2][11] = 50, [2][1][2][1][RTW89_KCC][1][11] = 38, [2][1][2][1][RTW89_KCC][0][11] = 12, [2][1][2][1][RTW89_ACMA][1][11] = 42, @@ -45195,6 +46968,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][11] = 6, [2][1][2][1][RTW89_UK][1][11] = 42, [2][1][2][1][RTW89_UK][0][11] = 6, + [2][1][2][1][RTW89_THAILAND][1][11] = 46, + [2][1][2][1][RTW89_THAILAND][0][11] = 6, [2][1][2][1][RTW89_FCC][1][18] = 20, [2][1][2][1][RTW89_FCC][2][18] = 50, [2][1][2][1][RTW89_ETSI][1][18] = 42, @@ -45202,6 +46977,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][18] = 52, [2][1][2][1][RTW89_MKK][0][18] = 12, [2][1][2][1][RTW89_IC][1][18] = 20, + [2][1][2][1][RTW89_IC][2][18] = 50, [2][1][2][1][RTW89_KCC][1][18] = 38, [2][1][2][1][RTW89_KCC][0][18] = 12, [2][1][2][1][RTW89_ACMA][1][18] = 42, @@ -45211,6 +46987,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][18] = 6, [2][1][2][1][RTW89_UK][1][18] = 42, [2][1][2][1][RTW89_UK][0][18] = 6, + [2][1][2][1][RTW89_THAILAND][1][18] = 46, + [2][1][2][1][RTW89_THAILAND][0][18] = 6, [2][1][2][1][RTW89_FCC][1][26] = 20, [2][1][2][1][RTW89_FCC][2][26] = 60, [2][1][2][1][RTW89_ETSI][1][26] = 42, @@ -45218,6 +46996,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][26] = 52, [2][1][2][1][RTW89_MKK][0][26] = 12, [2][1][2][1][RTW89_IC][1][26] = 20, + [2][1][2][1][RTW89_IC][2][26] = 60, [2][1][2][1][RTW89_KCC][1][26] = 38, [2][1][2][1][RTW89_KCC][0][26] = 12, [2][1][2][1][RTW89_ACMA][1][26] = 42, @@ -45227,6 +47006,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][26] = 6, [2][1][2][1][RTW89_UK][1][26] = 42, [2][1][2][1][RTW89_UK][0][26] = 6, + [2][1][2][1][RTW89_THAILAND][1][26] = 46, + [2][1][2][1][RTW89_THAILAND][0][26] = 6, [2][1][2][1][RTW89_FCC][1][33] = 20, [2][1][2][1][RTW89_FCC][2][33] = 60, [2][1][2][1][RTW89_ETSI][1][33] = 42, @@ -45234,6 +47015,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][33] = 48, [2][1][2][1][RTW89_MKK][0][33] = 12, [2][1][2][1][RTW89_IC][1][33] = 20, + [2][1][2][1][RTW89_IC][2][33] = 60, [2][1][2][1][RTW89_KCC][1][33] = 38, [2][1][2][1][RTW89_KCC][0][33] = 12, [2][1][2][1][RTW89_ACMA][1][33] = 42, @@ -45243,6 +47025,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][33] = 6, [2][1][2][1][RTW89_UK][1][33] = 42, [2][1][2][1][RTW89_UK][0][33] = 6, + [2][1][2][1][RTW89_THAILAND][1][33] = 46, + [2][1][2][1][RTW89_THAILAND][0][33] = 6, [2][1][2][1][RTW89_FCC][1][41] = 22, [2][1][2][1][RTW89_FCC][2][41] = 60, [2][1][2][1][RTW89_ETSI][1][41] = 42, @@ -45250,6 +47034,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][41] = 48, [2][1][2][1][RTW89_MKK][0][41] = 12, [2][1][2][1][RTW89_IC][1][41] = 22, + [2][1][2][1][RTW89_IC][2][41] = 60, [2][1][2][1][RTW89_KCC][1][41] = 38, [2][1][2][1][RTW89_KCC][0][41] = 12, [2][1][2][1][RTW89_ACMA][1][41] = 42, @@ -45259,6 +47044,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][41] = 6, [2][1][2][1][RTW89_UK][1][41] = 42, [2][1][2][1][RTW89_UK][0][41] = 6, + [2][1][2][1][RTW89_THAILAND][1][41] = 46, + [2][1][2][1][RTW89_THAILAND][0][41] = 6, [2][1][2][1][RTW89_FCC][1][48] = 22, [2][1][2][1][RTW89_FCC][2][48] = 127, [2][1][2][1][RTW89_ETSI][1][48] = 127, @@ -45266,6 +47053,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][48] = 127, [2][1][2][1][RTW89_MKK][0][48] = 127, [2][1][2][1][RTW89_IC][1][48] = 22, + [2][1][2][1][RTW89_IC][2][48] = 60, [2][1][2][1][RTW89_KCC][1][48] = 38, [2][1][2][1][RTW89_KCC][0][48] = 127, [2][1][2][1][RTW89_ACMA][1][48] = 127, @@ -45275,6 +47063,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][48] = 127, [2][1][2][1][RTW89_UK][1][48] = 127, [2][1][2][1][RTW89_UK][0][48] = 127, + [2][1][2][1][RTW89_THAILAND][1][48] = 127, + [2][1][2][1][RTW89_THAILAND][0][48] = 127, [2][1][2][1][RTW89_FCC][1][56] = 20, [2][1][2][1][RTW89_FCC][2][56] = 127, [2][1][2][1][RTW89_ETSI][1][56] = 127, @@ -45282,6 +47072,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][56] = 127, [2][1][2][1][RTW89_MKK][0][56] = 127, [2][1][2][1][RTW89_IC][1][56] = 20, + [2][1][2][1][RTW89_IC][2][56] = 56, [2][1][2][1][RTW89_KCC][1][56] = 38, [2][1][2][1][RTW89_KCC][0][56] = 127, [2][1][2][1][RTW89_ACMA][1][56] = 127, @@ -45291,6 +47082,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][56] = 127, [2][1][2][1][RTW89_UK][1][56] = 127, [2][1][2][1][RTW89_UK][0][56] = 127, + [2][1][2][1][RTW89_THAILAND][1][56] = 127, + [2][1][2][1][RTW89_THAILAND][0][56] = 127, [2][1][2][1][RTW89_FCC][1][63] = 22, [2][1][2][1][RTW89_FCC][2][63] = 58, [2][1][2][1][RTW89_ETSI][1][63] = 127, @@ -45298,6 +47091,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][63] = 127, [2][1][2][1][RTW89_MKK][0][63] = 127, [2][1][2][1][RTW89_IC][1][63] = 22, + [2][1][2][1][RTW89_IC][2][63] = 58, [2][1][2][1][RTW89_KCC][1][63] = 38, [2][1][2][1][RTW89_KCC][0][63] = 127, [2][1][2][1][RTW89_ACMA][1][63] = 127, @@ -45307,6 +47101,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][63] = 127, [2][1][2][1][RTW89_UK][1][63] = 127, [2][1][2][1][RTW89_UK][0][63] = 127, + [2][1][2][1][RTW89_THAILAND][1][63] = 127, + [2][1][2][1][RTW89_THAILAND][0][63] = 127, [2][1][2][1][RTW89_FCC][1][71] = 20, [2][1][2][1][RTW89_FCC][2][71] = 58, [2][1][2][1][RTW89_ETSI][1][71] = 127, @@ -45314,6 +47110,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][71] = 127, [2][1][2][1][RTW89_MKK][0][71] = 127, [2][1][2][1][RTW89_IC][1][71] = 20, + [2][1][2][1][RTW89_IC][2][71] = 58, [2][1][2][1][RTW89_KCC][1][71] = 38, [2][1][2][1][RTW89_KCC][0][71] = 127, [2][1][2][1][RTW89_ACMA][1][71] = 127, @@ -45323,6 +47120,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][71] = 127, [2][1][2][1][RTW89_UK][1][71] = 127, [2][1][2][1][RTW89_UK][0][71] = 127, + [2][1][2][1][RTW89_THAILAND][1][71] = 127, + [2][1][2][1][RTW89_THAILAND][0][71] = 127, [2][1][2][1][RTW89_FCC][1][78] = 20, [2][1][2][1][RTW89_FCC][2][78] = 58, [2][1][2][1][RTW89_ETSI][1][78] = 127, @@ -45330,6 +47129,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][78] = 127, [2][1][2][1][RTW89_MKK][0][78] = 127, [2][1][2][1][RTW89_IC][1][78] = 20, + [2][1][2][1][RTW89_IC][2][78] = 58, [2][1][2][1][RTW89_KCC][1][78] = 38, [2][1][2][1][RTW89_KCC][0][78] = 127, [2][1][2][1][RTW89_ACMA][1][78] = 127, @@ -45339,6 +47139,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][78] = 127, [2][1][2][1][RTW89_UK][1][78] = 127, [2][1][2][1][RTW89_UK][0][78] = 127, + [2][1][2][1][RTW89_THAILAND][1][78] = 127, + [2][1][2][1][RTW89_THAILAND][0][78] = 127, [2][1][2][1][RTW89_FCC][1][86] = 20, [2][1][2][1][RTW89_FCC][2][86] = 127, [2][1][2][1][RTW89_ETSI][1][86] = 127, @@ -45346,6 +47148,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][86] = 127, [2][1][2][1][RTW89_MKK][0][86] = 127, [2][1][2][1][RTW89_IC][1][86] = 20, + [2][1][2][1][RTW89_IC][2][86] = 127, [2][1][2][1][RTW89_KCC][1][86] = 38, [2][1][2][1][RTW89_KCC][0][86] = 127, [2][1][2][1][RTW89_ACMA][1][86] = 127, @@ -45355,6 +47158,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][86] = 127, [2][1][2][1][RTW89_UK][1][86] = 127, [2][1][2][1][RTW89_UK][0][86] = 127, + [2][1][2][1][RTW89_THAILAND][1][86] = 127, + [2][1][2][1][RTW89_THAILAND][0][86] = 127, [2][1][2][1][RTW89_FCC][1][93] = 22, [2][1][2][1][RTW89_FCC][2][93] = 127, [2][1][2][1][RTW89_ETSI][1][93] = 127, @@ -45362,6 +47167,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][93] = 127, [2][1][2][1][RTW89_MKK][0][93] = 127, [2][1][2][1][RTW89_IC][1][93] = 22, + [2][1][2][1][RTW89_IC][2][93] = 127, [2][1][2][1][RTW89_KCC][1][93] = 38, [2][1][2][1][RTW89_KCC][0][93] = 127, [2][1][2][1][RTW89_ACMA][1][93] = 127, @@ -45371,6 +47177,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][93] = 127, [2][1][2][1][RTW89_UK][1][93] = 127, [2][1][2][1][RTW89_UK][0][93] = 127, + [2][1][2][1][RTW89_THAILAND][1][93] = 127, + [2][1][2][1][RTW89_THAILAND][0][93] = 127, [2][1][2][1][RTW89_FCC][1][101] = 22, [2][1][2][1][RTW89_FCC][2][101] = 127, [2][1][2][1][RTW89_ETSI][1][101] = 127, @@ -45378,6 +47186,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][101] = 127, [2][1][2][1][RTW89_MKK][0][101] = 127, [2][1][2][1][RTW89_IC][1][101] = 22, + [2][1][2][1][RTW89_IC][2][101] = 127, [2][1][2][1][RTW89_KCC][1][101] = 38, [2][1][2][1][RTW89_KCC][0][101] = 127, [2][1][2][1][RTW89_ACMA][1][101] = 127, @@ -45387,6 +47196,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][101] = 127, [2][1][2][1][RTW89_UK][1][101] = 127, [2][1][2][1][RTW89_UK][0][101] = 127, + [2][1][2][1][RTW89_THAILAND][1][101] = 127, + [2][1][2][1][RTW89_THAILAND][0][101] = 127, [2][1][2][1][RTW89_FCC][1][108] = 127, [2][1][2][1][RTW89_FCC][2][108] = 127, [2][1][2][1][RTW89_ETSI][1][108] = 127, @@ -45394,6 +47205,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][108] = 127, [2][1][2][1][RTW89_MKK][0][108] = 127, [2][1][2][1][RTW89_IC][1][108] = 127, + [2][1][2][1][RTW89_IC][2][108] = 127, [2][1][2][1][RTW89_KCC][1][108] = 127, [2][1][2][1][RTW89_KCC][0][108] = 127, [2][1][2][1][RTW89_ACMA][1][108] = 127, @@ -45403,6 +47215,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][108] = 127, [2][1][2][1][RTW89_UK][1][108] = 127, [2][1][2][1][RTW89_UK][0][108] = 127, + [2][1][2][1][RTW89_THAILAND][1][108] = 127, + [2][1][2][1][RTW89_THAILAND][0][108] = 127, [2][1][2][1][RTW89_FCC][1][116] = 127, [2][1][2][1][RTW89_FCC][2][116] = 127, [2][1][2][1][RTW89_ETSI][1][116] = 127, @@ -45410,6 +47224,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][116] = 127, [2][1][2][1][RTW89_MKK][0][116] = 127, [2][1][2][1][RTW89_IC][1][116] = 127, + [2][1][2][1][RTW89_IC][2][116] = 127, [2][1][2][1][RTW89_KCC][1][116] = 127, [2][1][2][1][RTW89_KCC][0][116] = 127, [2][1][2][1][RTW89_ACMA][1][116] = 127, @@ -45419,6 +47234,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][116] = 127, [2][1][2][1][RTW89_UK][1][116] = 127, [2][1][2][1][RTW89_UK][0][116] = 127, + [2][1][2][1][RTW89_THAILAND][1][116] = 127, + [2][1][2][1][RTW89_THAILAND][0][116] = 127, [3][0][2][0][RTW89_FCC][1][7] = 52, [3][0][2][0][RTW89_FCC][2][7] = 52, [3][0][2][0][RTW89_ETSI][1][7] = 50, @@ -45426,6 +47243,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][7] = 50, [3][0][2][0][RTW89_MKK][0][7] = 22, [3][0][2][0][RTW89_IC][1][7] = 52, + [3][0][2][0][RTW89_IC][2][7] = 52, [3][0][2][0][RTW89_KCC][1][7] = 42, [3][0][2][0][RTW89_KCC][0][7] = 24, [3][0][2][0][RTW89_ACMA][1][7] = 50, @@ -45435,6 +47253,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][7] = 30, [3][0][2][0][RTW89_UK][1][7] = 50, [3][0][2][0][RTW89_UK][0][7] = 30, + [3][0][2][0][RTW89_THAILAND][1][7] = 50, + [3][0][2][0][RTW89_THAILAND][0][7] = 30, [3][0][2][0][RTW89_FCC][1][22] = 52, [3][0][2][0][RTW89_FCC][2][22] = 52, [3][0][2][0][RTW89_ETSI][1][22] = 50, @@ -45442,6 +47262,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][22] = 50, [3][0][2][0][RTW89_MKK][0][22] = 20, [3][0][2][0][RTW89_IC][1][22] = 52, + [3][0][2][0][RTW89_IC][2][22] = 52, [3][0][2][0][RTW89_KCC][1][22] = 42, [3][0][2][0][RTW89_KCC][0][22] = 24, [3][0][2][0][RTW89_ACMA][1][22] = 50, @@ -45451,6 +47272,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][22] = 30, [3][0][2][0][RTW89_UK][1][22] = 50, [3][0][2][0][RTW89_UK][0][22] = 30, + [3][0][2][0][RTW89_THAILAND][1][22] = 50, + [3][0][2][0][RTW89_THAILAND][0][22] = 30, [3][0][2][0][RTW89_FCC][1][37] = 52, [3][0][2][0][RTW89_FCC][2][37] = 52, [3][0][2][0][RTW89_ETSI][1][37] = 50, @@ -45458,6 +47281,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][37] = 50, [3][0][2][0][RTW89_MKK][0][37] = 20, [3][0][2][0][RTW89_IC][1][37] = 52, + [3][0][2][0][RTW89_IC][2][37] = 52, [3][0][2][0][RTW89_KCC][1][37] = 42, [3][0][2][0][RTW89_KCC][0][37] = 24, [3][0][2][0][RTW89_ACMA][1][37] = 50, @@ -45467,6 +47291,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][37] = 30, [3][0][2][0][RTW89_UK][1][37] = 50, [3][0][2][0][RTW89_UK][0][37] = 30, + [3][0][2][0][RTW89_THAILAND][1][37] = 50, + [3][0][2][0][RTW89_THAILAND][0][37] = 30, [3][0][2][0][RTW89_FCC][1][52] = 54, [3][0][2][0][RTW89_FCC][2][52] = 127, [3][0][2][0][RTW89_ETSI][1][52] = 127, @@ -45474,6 +47300,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][52] = 127, [3][0][2][0][RTW89_MKK][0][52] = 127, [3][0][2][0][RTW89_IC][1][52] = 54, + [3][0][2][0][RTW89_IC][2][52] = 56, [3][0][2][0][RTW89_KCC][1][52] = 56, [3][0][2][0][RTW89_KCC][0][52] = 127, [3][0][2][0][RTW89_ACMA][1][52] = 127, @@ -45483,6 +47310,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][52] = 127, [3][0][2][0][RTW89_UK][1][52] = 127, [3][0][2][0][RTW89_UK][0][52] = 127, + [3][0][2][0][RTW89_THAILAND][1][52] = 127, + [3][0][2][0][RTW89_THAILAND][0][52] = 127, [3][0][2][0][RTW89_FCC][1][67] = 54, [3][0][2][0][RTW89_FCC][2][67] = 54, [3][0][2][0][RTW89_ETSI][1][67] = 127, @@ -45490,6 +47319,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][67] = 127, [3][0][2][0][RTW89_MKK][0][67] = 127, [3][0][2][0][RTW89_IC][1][67] = 54, + [3][0][2][0][RTW89_IC][2][67] = 54, [3][0][2][0][RTW89_KCC][1][67] = 54, [3][0][2][0][RTW89_KCC][0][67] = 127, [3][0][2][0][RTW89_ACMA][1][67] = 127, @@ -45499,6 +47329,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][67] = 127, [3][0][2][0][RTW89_UK][1][67] = 127, [3][0][2][0][RTW89_UK][0][67] = 127, + [3][0][2][0][RTW89_THAILAND][1][67] = 127, + [3][0][2][0][RTW89_THAILAND][0][67] = 127, [3][0][2][0][RTW89_FCC][1][82] = 46, [3][0][2][0][RTW89_FCC][2][82] = 127, [3][0][2][0][RTW89_ETSI][1][82] = 127, @@ -45506,6 +47338,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][82] = 127, [3][0][2][0][RTW89_MKK][0][82] = 127, [3][0][2][0][RTW89_IC][1][82] = 46, + [3][0][2][0][RTW89_IC][2][82] = 127, [3][0][2][0][RTW89_KCC][1][82] = 26, [3][0][2][0][RTW89_KCC][0][82] = 127, [3][0][2][0][RTW89_ACMA][1][82] = 127, @@ -45515,6 +47348,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][82] = 127, [3][0][2][0][RTW89_UK][1][82] = 127, [3][0][2][0][RTW89_UK][0][82] = 127, + [3][0][2][0][RTW89_THAILAND][1][82] = 127, + [3][0][2][0][RTW89_THAILAND][0][82] = 127, [3][0][2][0][RTW89_FCC][1][97] = 40, [3][0][2][0][RTW89_FCC][2][97] = 127, [3][0][2][0][RTW89_ETSI][1][97] = 127, @@ -45522,6 +47357,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][97] = 127, [3][0][2][0][RTW89_MKK][0][97] = 127, [3][0][2][0][RTW89_IC][1][97] = 40, + [3][0][2][0][RTW89_IC][2][97] = 127, [3][0][2][0][RTW89_KCC][1][97] = 26, [3][0][2][0][RTW89_KCC][0][97] = 127, [3][0][2][0][RTW89_ACMA][1][97] = 127, @@ -45531,6 +47367,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][97] = 127, [3][0][2][0][RTW89_UK][1][97] = 127, [3][0][2][0][RTW89_UK][0][97] = 127, + [3][0][2][0][RTW89_THAILAND][1][97] = 127, + [3][0][2][0][RTW89_THAILAND][0][97] = 127, [3][0][2][0][RTW89_FCC][1][112] = 127, [3][0][2][0][RTW89_FCC][2][112] = 127, [3][0][2][0][RTW89_ETSI][1][112] = 127, @@ -45538,6 +47376,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][112] = 127, [3][0][2][0][RTW89_MKK][0][112] = 127, [3][0][2][0][RTW89_IC][1][112] = 127, + [3][0][2][0][RTW89_IC][2][112] = 127, [3][0][2][0][RTW89_KCC][1][112] = 127, [3][0][2][0][RTW89_KCC][0][112] = 127, [3][0][2][0][RTW89_ACMA][1][112] = 127, @@ -45547,6 +47386,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][112] = 127, [3][0][2][0][RTW89_UK][1][112] = 127, [3][0][2][0][RTW89_UK][0][112] = 127, + [3][0][2][0][RTW89_THAILAND][1][112] = 127, + [3][0][2][0][RTW89_THAILAND][0][112] = 127, [3][1][2][0][RTW89_FCC][1][7] = 32, [3][1][2][0][RTW89_FCC][2][7] = 46, [3][1][2][0][RTW89_ETSI][1][7] = 50, @@ -45554,6 +47395,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][7] = 38, [3][1][2][0][RTW89_MKK][0][7] = 10, [3][1][2][0][RTW89_IC][1][7] = 32, + [3][1][2][0][RTW89_IC][2][7] = 46, [3][1][2][0][RTW89_KCC][1][7] = 40, [3][1][2][0][RTW89_KCC][0][7] = 12, [3][1][2][0][RTW89_ACMA][1][7] = 50, @@ -45563,6 +47405,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][7] = 18, [3][1][2][0][RTW89_UK][1][7] = 50, [3][1][2][0][RTW89_UK][0][7] = 18, + [3][1][2][0][RTW89_THAILAND][1][7] = 46, + [3][1][2][0][RTW89_THAILAND][0][7] = 18, [3][1][2][0][RTW89_FCC][1][22] = 30, [3][1][2][0][RTW89_FCC][2][22] = 52, [3][1][2][0][RTW89_ETSI][1][22] = 46, @@ -45570,6 +47414,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][22] = 48, [3][1][2][0][RTW89_MKK][0][22] = 8, [3][1][2][0][RTW89_IC][1][22] = 30, + [3][1][2][0][RTW89_IC][2][22] = 52, [3][1][2][0][RTW89_KCC][1][22] = 40, [3][1][2][0][RTW89_KCC][0][22] = 12, [3][1][2][0][RTW89_ACMA][1][22] = 46, @@ -45579,6 +47424,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][22] = 16, [3][1][2][0][RTW89_UK][1][22] = 46, [3][1][2][0][RTW89_UK][0][22] = 16, + [3][1][2][0][RTW89_THAILAND][1][22] = 46, + [3][1][2][0][RTW89_THAILAND][0][22] = 18, [3][1][2][0][RTW89_FCC][1][37] = 30, [3][1][2][0][RTW89_FCC][2][37] = 52, [3][1][2][0][RTW89_ETSI][1][37] = 46, @@ -45586,6 +47433,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][37] = 48, [3][1][2][0][RTW89_MKK][0][37] = 8, [3][1][2][0][RTW89_IC][1][37] = 30, + [3][1][2][0][RTW89_IC][2][37] = 52, [3][1][2][0][RTW89_KCC][1][37] = 40, [3][1][2][0][RTW89_KCC][0][37] = 12, [3][1][2][0][RTW89_ACMA][1][37] = 46, @@ -45595,6 +47443,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][37] = 16, [3][1][2][0][RTW89_UK][1][37] = 46, [3][1][2][0][RTW89_UK][0][37] = 16, + [3][1][2][0][RTW89_THAILAND][1][37] = 46, + [3][1][2][0][RTW89_THAILAND][0][37] = 18, [3][1][2][0][RTW89_FCC][1][52] = 30, [3][1][2][0][RTW89_FCC][2][52] = 127, [3][1][2][0][RTW89_ETSI][1][52] = 127, @@ -45602,6 +47452,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][52] = 127, [3][1][2][0][RTW89_MKK][0][52] = 127, [3][1][2][0][RTW89_IC][1][52] = 30, + [3][1][2][0][RTW89_IC][2][52] = 56, [3][1][2][0][RTW89_KCC][1][52] = 48, [3][1][2][0][RTW89_KCC][0][52] = 127, [3][1][2][0][RTW89_ACMA][1][52] = 127, @@ -45611,6 +47462,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][52] = 127, [3][1][2][0][RTW89_UK][1][52] = 127, [3][1][2][0][RTW89_UK][0][52] = 127, + [3][1][2][0][RTW89_THAILAND][1][52] = 127, + [3][1][2][0][RTW89_THAILAND][0][52] = 127, [3][1][2][0][RTW89_FCC][1][67] = 32, [3][1][2][0][RTW89_FCC][2][67] = 54, [3][1][2][0][RTW89_ETSI][1][67] = 127, @@ -45618,6 +47471,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][67] = 127, [3][1][2][0][RTW89_MKK][0][67] = 127, [3][1][2][0][RTW89_IC][1][67] = 32, + [3][1][2][0][RTW89_IC][2][67] = 54, [3][1][2][0][RTW89_KCC][1][67] = 48, [3][1][2][0][RTW89_KCC][0][67] = 127, [3][1][2][0][RTW89_ACMA][1][67] = 127, @@ -45627,6 +47481,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][67] = 127, [3][1][2][0][RTW89_UK][1][67] = 127, [3][1][2][0][RTW89_UK][0][67] = 127, + [3][1][2][0][RTW89_THAILAND][1][67] = 127, + [3][1][2][0][RTW89_THAILAND][0][67] = 127, [3][1][2][0][RTW89_FCC][1][82] = 32, [3][1][2][0][RTW89_FCC][2][82] = 127, [3][1][2][0][RTW89_ETSI][1][82] = 127, @@ -45634,6 +47490,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][82] = 127, [3][1][2][0][RTW89_MKK][0][82] = 127, [3][1][2][0][RTW89_IC][1][82] = 32, + [3][1][2][0][RTW89_IC][2][82] = 127, [3][1][2][0][RTW89_KCC][1][82] = 24, [3][1][2][0][RTW89_KCC][0][82] = 127, [3][1][2][0][RTW89_ACMA][1][82] = 127, @@ -45643,6 +47500,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][82] = 127, [3][1][2][0][RTW89_UK][1][82] = 127, [3][1][2][0][RTW89_UK][0][82] = 127, + [3][1][2][0][RTW89_THAILAND][1][82] = 127, + [3][1][2][0][RTW89_THAILAND][0][82] = 127, [3][1][2][0][RTW89_FCC][1][97] = 32, [3][1][2][0][RTW89_FCC][2][97] = 127, [3][1][2][0][RTW89_ETSI][1][97] = 127, @@ -45650,6 +47509,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][97] = 127, [3][1][2][0][RTW89_MKK][0][97] = 127, [3][1][2][0][RTW89_IC][1][97] = 32, + [3][1][2][0][RTW89_IC][2][97] = 127, [3][1][2][0][RTW89_KCC][1][97] = 24, [3][1][2][0][RTW89_KCC][0][97] = 127, [3][1][2][0][RTW89_ACMA][1][97] = 127, @@ -45659,6 +47519,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][97] = 127, [3][1][2][0][RTW89_UK][1][97] = 127, [3][1][2][0][RTW89_UK][0][97] = 127, + [3][1][2][0][RTW89_THAILAND][1][97] = 127, + [3][1][2][0][RTW89_THAILAND][0][97] = 127, [3][1][2][0][RTW89_FCC][1][112] = 127, [3][1][2][0][RTW89_FCC][2][112] = 127, [3][1][2][0][RTW89_ETSI][1][112] = 127, @@ -45666,6 +47528,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][112] = 127, [3][1][2][0][RTW89_MKK][0][112] = 127, [3][1][2][0][RTW89_IC][1][112] = 127, + [3][1][2][0][RTW89_IC][2][112] = 127, [3][1][2][0][RTW89_KCC][1][112] = 127, [3][1][2][0][RTW89_KCC][0][112] = 127, [3][1][2][0][RTW89_ACMA][1][112] = 127, @@ -45675,6 +47538,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][112] = 127, [3][1][2][0][RTW89_UK][1][112] = 127, [3][1][2][0][RTW89_UK][0][112] = 127, + [3][1][2][0][RTW89_THAILAND][1][112] = 127, + [3][1][2][0][RTW89_THAILAND][0][112] = 127, [3][1][2][1][RTW89_FCC][1][7] = 32, [3][1][2][1][RTW89_FCC][2][7] = 46, [3][1][2][1][RTW89_ETSI][1][7] = 42, @@ -45682,6 +47547,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][7] = 38, [3][1][2][1][RTW89_MKK][0][7] = 10, [3][1][2][1][RTW89_IC][1][7] = 32, + [3][1][2][1][RTW89_IC][2][7] = 46, [3][1][2][1][RTW89_KCC][1][7] = 40, [3][1][2][1][RTW89_KCC][0][7] = 12, [3][1][2][1][RTW89_ACMA][1][7] = 42, @@ -45691,6 +47557,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][7] = 6, [3][1][2][1][RTW89_UK][1][7] = 42, [3][1][2][1][RTW89_UK][0][7] = 6, + [3][1][2][1][RTW89_THAILAND][1][7] = 46, + [3][1][2][1][RTW89_THAILAND][0][7] = 6, [3][1][2][1][RTW89_FCC][1][22] = 30, [3][1][2][1][RTW89_FCC][2][22] = 52, [3][1][2][1][RTW89_ETSI][1][22] = 42, @@ -45698,6 +47566,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][22] = 48, [3][1][2][1][RTW89_MKK][0][22] = 8, [3][1][2][1][RTW89_IC][1][22] = 30, + [3][1][2][1][RTW89_IC][2][22] = 52, [3][1][2][1][RTW89_KCC][1][22] = 40, [3][1][2][1][RTW89_KCC][0][22] = 12, [3][1][2][1][RTW89_ACMA][1][22] = 42, @@ -45707,6 +47576,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][22] = 6, [3][1][2][1][RTW89_UK][1][22] = 42, [3][1][2][1][RTW89_UK][0][22] = 6, + [3][1][2][1][RTW89_THAILAND][1][22] = 46, + [3][1][2][1][RTW89_THAILAND][0][22] = 6, [3][1][2][1][RTW89_FCC][1][37] = 30, [3][1][2][1][RTW89_FCC][2][37] = 52, [3][1][2][1][RTW89_ETSI][1][37] = 42, @@ -45714,6 +47585,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][37] = 48, [3][1][2][1][RTW89_MKK][0][37] = 8, [3][1][2][1][RTW89_IC][1][37] = 30, + [3][1][2][1][RTW89_IC][2][37] = 52, [3][1][2][1][RTW89_KCC][1][37] = 40, [3][1][2][1][RTW89_KCC][0][37] = 12, [3][1][2][1][RTW89_ACMA][1][37] = 42, @@ -45723,6 +47595,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][37] = 6, [3][1][2][1][RTW89_UK][1][37] = 42, [3][1][2][1][RTW89_UK][0][37] = 6, + [3][1][2][1][RTW89_THAILAND][1][37] = 46, + [3][1][2][1][RTW89_THAILAND][0][37] = 6, [3][1][2][1][RTW89_FCC][1][52] = 30, [3][1][2][1][RTW89_FCC][2][52] = 127, [3][1][2][1][RTW89_ETSI][1][52] = 127, @@ -45730,6 +47604,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][52] = 127, [3][1][2][1][RTW89_MKK][0][52] = 127, [3][1][2][1][RTW89_IC][1][52] = 30, + [3][1][2][1][RTW89_IC][2][52] = 56, [3][1][2][1][RTW89_KCC][1][52] = 48, [3][1][2][1][RTW89_KCC][0][52] = 127, [3][1][2][1][RTW89_ACMA][1][52] = 127, @@ -45739,6 +47614,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][52] = 127, [3][1][2][1][RTW89_UK][1][52] = 127, [3][1][2][1][RTW89_UK][0][52] = 127, + [3][1][2][1][RTW89_THAILAND][1][52] = 127, + [3][1][2][1][RTW89_THAILAND][0][52] = 127, [3][1][2][1][RTW89_FCC][1][67] = 32, [3][1][2][1][RTW89_FCC][2][67] = 54, [3][1][2][1][RTW89_ETSI][1][67] = 127, @@ -45746,6 +47623,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][67] = 127, [3][1][2][1][RTW89_MKK][0][67] = 127, [3][1][2][1][RTW89_IC][1][67] = 32, + [3][1][2][1][RTW89_IC][2][67] = 54, [3][1][2][1][RTW89_KCC][1][67] = 48, [3][1][2][1][RTW89_KCC][0][67] = 127, [3][1][2][1][RTW89_ACMA][1][67] = 127, @@ -45755,6 +47633,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][67] = 127, [3][1][2][1][RTW89_UK][1][67] = 127, [3][1][2][1][RTW89_UK][0][67] = 127, + [3][1][2][1][RTW89_THAILAND][1][67] = 127, + [3][1][2][1][RTW89_THAILAND][0][67] = 127, [3][1][2][1][RTW89_FCC][1][82] = 32, [3][1][2][1][RTW89_FCC][2][82] = 127, [3][1][2][1][RTW89_ETSI][1][82] = 127, @@ -45762,6 +47642,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][82] = 127, [3][1][2][1][RTW89_MKK][0][82] = 127, [3][1][2][1][RTW89_IC][1][82] = 32, + [3][1][2][1][RTW89_IC][2][82] = 127, [3][1][2][1][RTW89_KCC][1][82] = 24, [3][1][2][1][RTW89_KCC][0][82] = 127, [3][1][2][1][RTW89_ACMA][1][82] = 127, @@ -45771,6 +47652,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][82] = 127, [3][1][2][1][RTW89_UK][1][82] = 127, [3][1][2][1][RTW89_UK][0][82] = 127, + [3][1][2][1][RTW89_THAILAND][1][82] = 127, + [3][1][2][1][RTW89_THAILAND][0][82] = 127, [3][1][2][1][RTW89_FCC][1][97] = 32, [3][1][2][1][RTW89_FCC][2][97] = 127, [3][1][2][1][RTW89_ETSI][1][97] = 127, @@ -45778,6 +47661,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][97] = 127, [3][1][2][1][RTW89_MKK][0][97] = 127, [3][1][2][1][RTW89_IC][1][97] = 32, + [3][1][2][1][RTW89_IC][2][97] = 127, [3][1][2][1][RTW89_KCC][1][97] = 24, [3][1][2][1][RTW89_KCC][0][97] = 127, [3][1][2][1][RTW89_ACMA][1][97] = 127, @@ -45787,6 +47671,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][97] = 127, [3][1][2][1][RTW89_UK][1][97] = 127, [3][1][2][1][RTW89_UK][0][97] = 127, + [3][1][2][1][RTW89_THAILAND][1][97] = 127, + [3][1][2][1][RTW89_THAILAND][0][97] = 127, [3][1][2][1][RTW89_FCC][1][112] = 127, [3][1][2][1][RTW89_FCC][2][112] = 127, [3][1][2][1][RTW89_ETSI][1][112] = 127, @@ -45794,6 +47680,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][112] = 127, [3][1][2][1][RTW89_MKK][0][112] = 127, [3][1][2][1][RTW89_IC][1][112] = 127, + [3][1][2][1][RTW89_IC][2][112] = 127, [3][1][2][1][RTW89_KCC][1][112] = 127, [3][1][2][1][RTW89_KCC][0][112] = 127, [3][1][2][1][RTW89_ACMA][1][112] = 127, @@ -45803,6 +47690,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][112] = 127, [3][1][2][1][RTW89_UK][1][112] = 127, [3][1][2][1][RTW89_UK][0][112] = 127, + [3][1][2][1][RTW89_THAILAND][1][112] = 127, + [3][1][2][1][RTW89_THAILAND][0][112] = 127, }; static @@ -45904,6 +47793,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][0] = 34, [0][0][RTW89_CHILE][0] = 60, [0][0][RTW89_QATAR][0] = 34, + [0][0][RTW89_THAILAND][0] = 34, [0][0][RTW89_FCC][1] = 60, [0][0][RTW89_ETSI][1] = 38, [0][0][RTW89_MKK][1] = 40, @@ -45916,6 +47806,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][1] = 38, [0][0][RTW89_CHILE][1] = 50, [0][0][RTW89_QATAR][1] = 38, + [0][0][RTW89_THAILAND][1] = 38, [0][0][RTW89_FCC][2] = 64, [0][0][RTW89_ETSI][2] = 38, [0][0][RTW89_MKK][2] = 40, @@ -45928,6 +47819,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][2] = 38, [0][0][RTW89_CHILE][2] = 50, [0][0][RTW89_QATAR][2] = 38, + [0][0][RTW89_THAILAND][2] = 38, [0][0][RTW89_FCC][3] = 68, [0][0][RTW89_ETSI][3] = 38, [0][0][RTW89_MKK][3] = 40, @@ -45940,6 +47832,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][3] = 38, [0][0][RTW89_CHILE][3] = 50, [0][0][RTW89_QATAR][3] = 38, + [0][0][RTW89_THAILAND][3] = 38, [0][0][RTW89_FCC][4] = 68, [0][0][RTW89_ETSI][4] = 38, [0][0][RTW89_MKK][4] = 40, @@ -45952,6 +47845,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][4] = 38, [0][0][RTW89_CHILE][4] = 50, [0][0][RTW89_QATAR][4] = 38, + [0][0][RTW89_THAILAND][4] = 38, [0][0][RTW89_FCC][5] = 78, [0][0][RTW89_ETSI][5] = 38, [0][0][RTW89_MKK][5] = 40, @@ -45964,6 +47858,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][5] = 38, [0][0][RTW89_CHILE][5] = 78, [0][0][RTW89_QATAR][5] = 38, + [0][0][RTW89_THAILAND][5] = 38, [0][0][RTW89_FCC][6] = 54, [0][0][RTW89_ETSI][6] = 38, [0][0][RTW89_MKK][6] = 40, @@ -45976,6 +47871,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][6] = 38, [0][0][RTW89_CHILE][6] = 36, [0][0][RTW89_QATAR][6] = 38, + [0][0][RTW89_THAILAND][6] = 38, [0][0][RTW89_FCC][7] = 54, [0][0][RTW89_ETSI][7] = 38, [0][0][RTW89_MKK][7] = 40, @@ -45988,6 +47884,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][7] = 38, [0][0][RTW89_CHILE][7] = 36, [0][0][RTW89_QATAR][7] = 38, + [0][0][RTW89_THAILAND][7] = 38, [0][0][RTW89_FCC][8] = 50, [0][0][RTW89_ETSI][8] = 38, [0][0][RTW89_MKK][8] = 40, @@ -46000,6 +47897,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][8] = 38, [0][0][RTW89_CHILE][8] = 36, [0][0][RTW89_QATAR][8] = 38, + [0][0][RTW89_THAILAND][8] = 38, [0][0][RTW89_FCC][9] = 46, [0][0][RTW89_ETSI][9] = 38, [0][0][RTW89_MKK][9] = 40, @@ -46012,6 +47910,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][9] = 38, [0][0][RTW89_CHILE][9] = 36, [0][0][RTW89_QATAR][9] = 38, + [0][0][RTW89_THAILAND][9] = 38, [0][0][RTW89_FCC][10] = 46, [0][0][RTW89_ETSI][10] = 38, [0][0][RTW89_MKK][10] = 40, @@ -46024,6 +47923,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][10] = 38, [0][0][RTW89_CHILE][10] = 46, [0][0][RTW89_QATAR][10] = 38, + [0][0][RTW89_THAILAND][10] = 38, [0][0][RTW89_FCC][11] = 26, [0][0][RTW89_ETSI][11] = 38, [0][0][RTW89_MKK][11] = 40, @@ -46036,6 +47936,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][11] = 38, [0][0][RTW89_CHILE][11] = 26, [0][0][RTW89_QATAR][11] = 38, + [0][0][RTW89_THAILAND][11] = 38, [0][0][RTW89_FCC][12] = -20, [0][0][RTW89_ETSI][12] = 34, [0][0][RTW89_MKK][12] = 36, @@ -46048,6 +47949,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][12] = 34, [0][0][RTW89_CHILE][12] = -20, [0][0][RTW89_QATAR][12] = 34, + [0][0][RTW89_THAILAND][12] = 34, [0][0][RTW89_FCC][13] = 127, [0][0][RTW89_ETSI][13] = 127, [0][0][RTW89_MKK][13] = 127, @@ -46060,6 +47962,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][13] = 127, [0][0][RTW89_CHILE][13] = 127, [0][0][RTW89_QATAR][13] = 127, + [0][0][RTW89_THAILAND][13] = 127, [0][1][RTW89_FCC][0] = 56, [0][1][RTW89_ETSI][0] = 22, [0][1][RTW89_MKK][0] = 24, @@ -46072,6 +47975,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][0] = 22, [0][1][RTW89_CHILE][0] = 56, [0][1][RTW89_QATAR][0] = 22, + [0][1][RTW89_THAILAND][0] = 22, [0][1][RTW89_FCC][1] = 56, [0][1][RTW89_ETSI][1] = 24, [0][1][RTW89_MKK][1] = 30, @@ -46084,6 +47988,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][1] = 24, [0][1][RTW89_CHILE][1] = 40, [0][1][RTW89_QATAR][1] = 24, + [0][1][RTW89_THAILAND][1] = 24, [0][1][RTW89_FCC][2] = 60, [0][1][RTW89_ETSI][2] = 24, [0][1][RTW89_MKK][2] = 30, @@ -46096,6 +48001,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][2] = 24, [0][1][RTW89_CHILE][2] = 40, [0][1][RTW89_QATAR][2] = 24, + [0][1][RTW89_THAILAND][2] = 24, [0][1][RTW89_FCC][3] = 64, [0][1][RTW89_ETSI][3] = 24, [0][1][RTW89_MKK][3] = 30, @@ -46108,6 +48014,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][3] = 24, [0][1][RTW89_CHILE][3] = 40, [0][1][RTW89_QATAR][3] = 24, + [0][1][RTW89_THAILAND][3] = 24, [0][1][RTW89_FCC][4] = 68, [0][1][RTW89_ETSI][4] = 24, [0][1][RTW89_MKK][4] = 30, @@ -46120,6 +48027,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][4] = 24, [0][1][RTW89_CHILE][4] = 40, [0][1][RTW89_QATAR][4] = 24, + [0][1][RTW89_THAILAND][4] = 24, [0][1][RTW89_FCC][5] = 76, [0][1][RTW89_ETSI][5] = 24, [0][1][RTW89_MKK][5] = 30, @@ -46132,6 +48040,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][5] = 24, [0][1][RTW89_CHILE][5] = 76, [0][1][RTW89_QATAR][5] = 24, + [0][1][RTW89_THAILAND][5] = 24, [0][1][RTW89_FCC][6] = 54, [0][1][RTW89_ETSI][6] = 24, [0][1][RTW89_MKK][6] = 30, @@ -46144,6 +48053,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][6] = 24, [0][1][RTW89_CHILE][6] = 26, [0][1][RTW89_QATAR][6] = 24, + [0][1][RTW89_THAILAND][6] = 24, [0][1][RTW89_FCC][7] = 50, [0][1][RTW89_ETSI][7] = 24, [0][1][RTW89_MKK][7] = 30, @@ -46156,6 +48066,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][7] = 24, [0][1][RTW89_CHILE][7] = 26, [0][1][RTW89_QATAR][7] = 24, + [0][1][RTW89_THAILAND][7] = 24, [0][1][RTW89_FCC][8] = 46, [0][1][RTW89_ETSI][8] = 24, [0][1][RTW89_MKK][8] = 30, @@ -46168,6 +48079,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][8] = 24, [0][1][RTW89_CHILE][8] = 26, [0][1][RTW89_QATAR][8] = 24, + [0][1][RTW89_THAILAND][8] = 24, [0][1][RTW89_FCC][9] = 42, [0][1][RTW89_ETSI][9] = 24, [0][1][RTW89_MKK][9] = 30, @@ -46180,6 +48092,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][9] = 24, [0][1][RTW89_CHILE][9] = 26, [0][1][RTW89_QATAR][9] = 24, + [0][1][RTW89_THAILAND][9] = 24, [0][1][RTW89_FCC][10] = 42, [0][1][RTW89_ETSI][10] = 24, [0][1][RTW89_MKK][10] = 30, @@ -46192,6 +48105,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][10] = 24, [0][1][RTW89_CHILE][10] = 42, [0][1][RTW89_QATAR][10] = 24, + [0][1][RTW89_THAILAND][10] = 24, [0][1][RTW89_FCC][11] = 22, [0][1][RTW89_ETSI][11] = 24, [0][1][RTW89_MKK][11] = 30, @@ -46204,6 +48118,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][11] = 24, [0][1][RTW89_CHILE][11] = 22, [0][1][RTW89_QATAR][11] = 24, + [0][1][RTW89_THAILAND][11] = 24, [0][1][RTW89_FCC][12] = -30, [0][1][RTW89_ETSI][12] = 20, [0][1][RTW89_MKK][12] = 24, @@ -46216,6 +48131,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][12] = 20, [0][1][RTW89_CHILE][12] = -30, [0][1][RTW89_QATAR][12] = 20, + [0][1][RTW89_THAILAND][12] = 20, [0][1][RTW89_FCC][13] = 127, [0][1][RTW89_ETSI][13] = 127, [0][1][RTW89_MKK][13] = 127, @@ -46228,6 +48144,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][13] = 127, [0][1][RTW89_CHILE][13] = 127, [0][1][RTW89_QATAR][13] = 127, + [0][1][RTW89_THAILAND][13] = 127, [1][0][RTW89_FCC][0] = 66, [1][0][RTW89_ETSI][0] = 46, [1][0][RTW89_MKK][0] = 48, @@ -46240,6 +48157,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][0] = 46, [1][0][RTW89_CHILE][0] = 66, [1][0][RTW89_QATAR][0] = 46, + [1][0][RTW89_THAILAND][0] = 46, [1][0][RTW89_FCC][1] = 66, [1][0][RTW89_ETSI][1] = 46, [1][0][RTW89_MKK][1] = 48, @@ -46252,6 +48170,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][1] = 46, [1][0][RTW89_CHILE][1] = 54, [1][0][RTW89_QATAR][1] = 46, + [1][0][RTW89_THAILAND][1] = 46, [1][0][RTW89_FCC][2] = 70, [1][0][RTW89_ETSI][2] = 46, [1][0][RTW89_MKK][2] = 48, @@ -46264,6 +48183,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][2] = 46, [1][0][RTW89_CHILE][2] = 54, [1][0][RTW89_QATAR][2] = 46, + [1][0][RTW89_THAILAND][2] = 46, [1][0][RTW89_FCC][3] = 72, [1][0][RTW89_ETSI][3] = 46, [1][0][RTW89_MKK][3] = 48, @@ -46276,6 +48196,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][3] = 46, [1][0][RTW89_CHILE][3] = 54, [1][0][RTW89_QATAR][3] = 46, + [1][0][RTW89_THAILAND][3] = 46, [1][0][RTW89_FCC][4] = 72, [1][0][RTW89_ETSI][4] = 46, [1][0][RTW89_MKK][4] = 48, @@ -46288,6 +48209,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][4] = 46, [1][0][RTW89_CHILE][4] = 54, [1][0][RTW89_QATAR][4] = 46, + [1][0][RTW89_THAILAND][4] = 46, [1][0][RTW89_FCC][5] = 82, [1][0][RTW89_ETSI][5] = 46, [1][0][RTW89_MKK][5] = 48, @@ -46300,6 +48222,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][5] = 46, [1][0][RTW89_CHILE][5] = 82, [1][0][RTW89_QATAR][5] = 46, + [1][0][RTW89_THAILAND][5] = 46, [1][0][RTW89_FCC][6] = 58, [1][0][RTW89_ETSI][6] = 44, [1][0][RTW89_MKK][6] = 48, @@ -46312,6 +48235,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][6] = 44, [1][0][RTW89_CHILE][6] = 40, [1][0][RTW89_QATAR][6] = 44, + [1][0][RTW89_THAILAND][6] = 44, [1][0][RTW89_FCC][7] = 58, [1][0][RTW89_ETSI][7] = 46, [1][0][RTW89_MKK][7] = 48, @@ -46324,6 +48248,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][7] = 46, [1][0][RTW89_CHILE][7] = 40, [1][0][RTW89_QATAR][7] = 46, + [1][0][RTW89_THAILAND][7] = 46, [1][0][RTW89_FCC][8] = 58, [1][0][RTW89_ETSI][8] = 46, [1][0][RTW89_MKK][8] = 48, @@ -46336,6 +48261,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][8] = 46, [1][0][RTW89_CHILE][8] = 40, [1][0][RTW89_QATAR][8] = 46, + [1][0][RTW89_THAILAND][8] = 46, [1][0][RTW89_FCC][9] = 54, [1][0][RTW89_ETSI][9] = 46, [1][0][RTW89_MKK][9] = 48, @@ -46348,6 +48274,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][9] = 46, [1][0][RTW89_CHILE][9] = 40, [1][0][RTW89_QATAR][9] = 46, + [1][0][RTW89_THAILAND][9] = 46, [1][0][RTW89_FCC][10] = 54, [1][0][RTW89_ETSI][10] = 46, [1][0][RTW89_MKK][10] = 48, @@ -46360,6 +48287,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][10] = 46, [1][0][RTW89_CHILE][10] = 54, [1][0][RTW89_QATAR][10] = 46, + [1][0][RTW89_THAILAND][10] = 46, [1][0][RTW89_FCC][11] = 36, [1][0][RTW89_ETSI][11] = 46, [1][0][RTW89_MKK][11] = 48, @@ -46372,6 +48300,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][11] = 46, [1][0][RTW89_CHILE][11] = 36, [1][0][RTW89_QATAR][11] = 46, + [1][0][RTW89_THAILAND][11] = 46, [1][0][RTW89_FCC][12] = 4, [1][0][RTW89_ETSI][12] = 46, [1][0][RTW89_MKK][12] = 46, @@ -46384,6 +48313,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][12] = 46, [1][0][RTW89_CHILE][12] = 4, [1][0][RTW89_QATAR][12] = 46, + [1][0][RTW89_THAILAND][12] = 46, [1][0][RTW89_FCC][13] = 127, [1][0][RTW89_ETSI][13] = 127, [1][0][RTW89_MKK][13] = 127, @@ -46396,6 +48326,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][13] = 127, [1][0][RTW89_CHILE][13] = 127, [1][0][RTW89_QATAR][13] = 127, + [1][0][RTW89_THAILAND][13] = 127, [1][1][RTW89_FCC][0] = 58, [1][1][RTW89_ETSI][0] = 32, [1][1][RTW89_MKK][0] = 34, @@ -46408,6 +48339,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][0] = 32, [1][1][RTW89_CHILE][0] = 58, [1][1][RTW89_QATAR][0] = 32, + [1][1][RTW89_THAILAND][0] = 32, [1][1][RTW89_FCC][1] = 58, [1][1][RTW89_ETSI][1] = 34, [1][1][RTW89_MKK][1] = 34, @@ -46420,6 +48352,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][1] = 34, [1][1][RTW89_CHILE][1] = 40, [1][1][RTW89_QATAR][1] = 34, + [1][1][RTW89_THAILAND][1] = 34, [1][1][RTW89_FCC][2] = 62, [1][1][RTW89_ETSI][2] = 34, [1][1][RTW89_MKK][2] = 34, @@ -46432,6 +48365,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][2] = 34, [1][1][RTW89_CHILE][2] = 40, [1][1][RTW89_QATAR][2] = 34, + [1][1][RTW89_THAILAND][2] = 34, [1][1][RTW89_FCC][3] = 66, [1][1][RTW89_ETSI][3] = 34, [1][1][RTW89_MKK][3] = 34, @@ -46444,6 +48378,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][3] = 34, [1][1][RTW89_CHILE][3] = 40, [1][1][RTW89_QATAR][3] = 34, + [1][1][RTW89_THAILAND][3] = 34, [1][1][RTW89_FCC][4] = 70, [1][1][RTW89_ETSI][4] = 34, [1][1][RTW89_MKK][4] = 34, @@ -46456,6 +48391,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][4] = 34, [1][1][RTW89_CHILE][4] = 40, [1][1][RTW89_QATAR][4] = 34, + [1][1][RTW89_THAILAND][4] = 34, [1][1][RTW89_FCC][5] = 82, [1][1][RTW89_ETSI][5] = 34, [1][1][RTW89_MKK][5] = 34, @@ -46468,6 +48404,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][5] = 34, [1][1][RTW89_CHILE][5] = 78, [1][1][RTW89_QATAR][5] = 34, + [1][1][RTW89_THAILAND][5] = 34, [1][1][RTW89_FCC][6] = 60, [1][1][RTW89_ETSI][6] = 34, [1][1][RTW89_MKK][6] = 34, @@ -46480,6 +48417,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][6] = 34, [1][1][RTW89_CHILE][6] = 30, [1][1][RTW89_QATAR][6] = 34, + [1][1][RTW89_THAILAND][6] = 34, [1][1][RTW89_FCC][7] = 56, [1][1][RTW89_ETSI][7] = 34, [1][1][RTW89_MKK][7] = 34, @@ -46492,6 +48430,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][7] = 34, [1][1][RTW89_CHILE][7] = 30, [1][1][RTW89_QATAR][7] = 34, + [1][1][RTW89_THAILAND][7] = 34, [1][1][RTW89_FCC][8] = 52, [1][1][RTW89_ETSI][8] = 34, [1][1][RTW89_MKK][8] = 34, @@ -46504,6 +48443,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][8] = 34, [1][1][RTW89_CHILE][8] = 30, [1][1][RTW89_QATAR][8] = 34, + [1][1][RTW89_THAILAND][8] = 34, [1][1][RTW89_FCC][9] = 48, [1][1][RTW89_ETSI][9] = 34, [1][1][RTW89_MKK][9] = 34, @@ -46516,6 +48456,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][9] = 34, [1][1][RTW89_CHILE][9] = 30, [1][1][RTW89_QATAR][9] = 34, + [1][1][RTW89_THAILAND][9] = 34, [1][1][RTW89_FCC][10] = 48, [1][1][RTW89_ETSI][10] = 34, [1][1][RTW89_MKK][10] = 34, @@ -46528,6 +48469,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][10] = 34, [1][1][RTW89_CHILE][10] = 48, [1][1][RTW89_QATAR][10] = 34, + [1][1][RTW89_THAILAND][10] = 34, [1][1][RTW89_FCC][11] = 30, [1][1][RTW89_ETSI][11] = 34, [1][1][RTW89_MKK][11] = 34, @@ -46540,6 +48482,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][11] = 34, [1][1][RTW89_CHILE][11] = 30, [1][1][RTW89_QATAR][11] = 34, + [1][1][RTW89_THAILAND][11] = 34, [1][1][RTW89_FCC][12] = -6, [1][1][RTW89_ETSI][12] = 34, [1][1][RTW89_MKK][12] = 34, @@ -46552,6 +48495,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][12] = 34, [1][1][RTW89_CHILE][12] = -6, [1][1][RTW89_QATAR][12] = 34, + [1][1][RTW89_THAILAND][12] = 34, [1][1][RTW89_FCC][13] = 127, [1][1][RTW89_ETSI][13] = 127, [1][1][RTW89_MKK][13] = 127, @@ -46564,6 +48508,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][13] = 127, [1][1][RTW89_CHILE][13] = 127, [1][1][RTW89_QATAR][13] = 127, + [1][1][RTW89_THAILAND][13] = 127, [2][0][RTW89_FCC][0] = 70, [2][0][RTW89_ETSI][0] = 58, [2][0][RTW89_MKK][0] = 58, @@ -46576,6 +48521,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][0] = 58, [2][0][RTW89_CHILE][0] = 70, [2][0][RTW89_QATAR][0] = 58, + [2][0][RTW89_THAILAND][0] = 58, [2][0][RTW89_FCC][1] = 70, [2][0][RTW89_ETSI][1] = 58, [2][0][RTW89_MKK][1] = 58, @@ -46588,6 +48534,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][1] = 58, [2][0][RTW89_CHILE][1] = 54, [2][0][RTW89_QATAR][1] = 58, + [2][0][RTW89_THAILAND][1] = 58, [2][0][RTW89_FCC][2] = 72, [2][0][RTW89_ETSI][2] = 58, [2][0][RTW89_MKK][2] = 58, @@ -46600,6 +48547,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][2] = 58, [2][0][RTW89_CHILE][2] = 54, [2][0][RTW89_QATAR][2] = 58, + [2][0][RTW89_THAILAND][2] = 58, [2][0][RTW89_FCC][3] = 72, [2][0][RTW89_ETSI][3] = 58, [2][0][RTW89_MKK][3] = 58, @@ -46612,6 +48560,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][3] = 58, [2][0][RTW89_CHILE][3] = 54, [2][0][RTW89_QATAR][3] = 58, + [2][0][RTW89_THAILAND][3] = 58, [2][0][RTW89_FCC][4] = 72, [2][0][RTW89_ETSI][4] = 58, [2][0][RTW89_MKK][4] = 58, @@ -46624,6 +48573,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][4] = 58, [2][0][RTW89_CHILE][4] = 54, [2][0][RTW89_QATAR][4] = 58, + [2][0][RTW89_THAILAND][4] = 58, [2][0][RTW89_FCC][5] = 82, [2][0][RTW89_ETSI][5] = 58, [2][0][RTW89_MKK][5] = 58, @@ -46636,6 +48586,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][5] = 58, [2][0][RTW89_CHILE][5] = 82, [2][0][RTW89_QATAR][5] = 58, + [2][0][RTW89_THAILAND][5] = 58, [2][0][RTW89_FCC][6] = 66, [2][0][RTW89_ETSI][6] = 56, [2][0][RTW89_MKK][6] = 58, @@ -46648,6 +48599,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][6] = 56, [2][0][RTW89_CHILE][6] = 48, [2][0][RTW89_QATAR][6] = 56, + [2][0][RTW89_THAILAND][6] = 56, [2][0][RTW89_FCC][7] = 66, [2][0][RTW89_ETSI][7] = 58, [2][0][RTW89_MKK][7] = 58, @@ -46660,6 +48612,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][7] = 58, [2][0][RTW89_CHILE][7] = 48, [2][0][RTW89_QATAR][7] = 58, + [2][0][RTW89_THAILAND][7] = 58, [2][0][RTW89_FCC][8] = 66, [2][0][RTW89_ETSI][8] = 58, [2][0][RTW89_MKK][8] = 58, @@ -46672,6 +48625,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][8] = 58, [2][0][RTW89_CHILE][8] = 48, [2][0][RTW89_QATAR][8] = 58, + [2][0][RTW89_THAILAND][8] = 58, [2][0][RTW89_FCC][9] = 64, [2][0][RTW89_ETSI][9] = 58, [2][0][RTW89_MKK][9] = 58, @@ -46684,6 +48638,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][9] = 58, [2][0][RTW89_CHILE][9] = 48, [2][0][RTW89_QATAR][9] = 58, + [2][0][RTW89_THAILAND][9] = 58, [2][0][RTW89_FCC][10] = 64, [2][0][RTW89_ETSI][10] = 58, [2][0][RTW89_MKK][10] = 58, @@ -46696,6 +48651,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][10] = 58, [2][0][RTW89_CHILE][10] = 64, [2][0][RTW89_QATAR][10] = 58, + [2][0][RTW89_THAILAND][10] = 58, [2][0][RTW89_FCC][11] = 48, [2][0][RTW89_ETSI][11] = 58, [2][0][RTW89_MKK][11] = 58, @@ -46708,6 +48664,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][11] = 58, [2][0][RTW89_CHILE][11] = 48, [2][0][RTW89_QATAR][11] = 58, + [2][0][RTW89_THAILAND][11] = 58, [2][0][RTW89_FCC][12] = 16, [2][0][RTW89_ETSI][12] = 58, [2][0][RTW89_MKK][12] = 58, @@ -46720,6 +48677,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][12] = 58, [2][0][RTW89_CHILE][12] = 16, [2][0][RTW89_QATAR][12] = 58, + [2][0][RTW89_THAILAND][12] = 58, [2][0][RTW89_FCC][13] = 127, [2][0][RTW89_ETSI][13] = 127, [2][0][RTW89_MKK][13] = 127, @@ -46732,6 +48690,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][13] = 127, [2][0][RTW89_CHILE][13] = 127, [2][0][RTW89_QATAR][13] = 127, + [2][0][RTW89_THAILAND][13] = 127, [2][1][RTW89_FCC][0] = 64, [2][1][RTW89_ETSI][0] = 46, [2][1][RTW89_MKK][0] = 46, @@ -46744,6 +48703,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][0] = 46, [2][1][RTW89_CHILE][0] = 64, [2][1][RTW89_QATAR][0] = 46, + [2][1][RTW89_THAILAND][0] = 46, [2][1][RTW89_FCC][1] = 64, [2][1][RTW89_ETSI][1] = 46, [2][1][RTW89_MKK][1] = 46, @@ -46756,6 +48716,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][1] = 46, [2][1][RTW89_CHILE][1] = 44, [2][1][RTW89_QATAR][1] = 46, + [2][1][RTW89_THAILAND][1] = 46, [2][1][RTW89_FCC][2] = 68, [2][1][RTW89_ETSI][2] = 46, [2][1][RTW89_MKK][2] = 46, @@ -46768,6 +48729,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][2] = 46, [2][1][RTW89_CHILE][2] = 44, [2][1][RTW89_QATAR][2] = 46, + [2][1][RTW89_THAILAND][2] = 46, [2][1][RTW89_FCC][3] = 72, [2][1][RTW89_ETSI][3] = 46, [2][1][RTW89_MKK][3] = 46, @@ -46780,6 +48742,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][3] = 46, [2][1][RTW89_CHILE][3] = 44, [2][1][RTW89_QATAR][3] = 46, + [2][1][RTW89_THAILAND][3] = 46, [2][1][RTW89_FCC][4] = 74, [2][1][RTW89_ETSI][4] = 46, [2][1][RTW89_MKK][4] = 46, @@ -46792,6 +48755,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][4] = 46, [2][1][RTW89_CHILE][4] = 44, [2][1][RTW89_QATAR][4] = 46, + [2][1][RTW89_THAILAND][4] = 46, [2][1][RTW89_FCC][5] = 82, [2][1][RTW89_ETSI][5] = 46, [2][1][RTW89_MKK][5] = 46, @@ -46804,6 +48768,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][5] = 46, [2][1][RTW89_CHILE][5] = 78, [2][1][RTW89_QATAR][5] = 46, + [2][1][RTW89_THAILAND][5] = 46, [2][1][RTW89_FCC][6] = 72, [2][1][RTW89_ETSI][6] = 44, [2][1][RTW89_MKK][6] = 46, @@ -46816,6 +48781,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][6] = 44, [2][1][RTW89_CHILE][6] = 42, [2][1][RTW89_QATAR][6] = 44, + [2][1][RTW89_THAILAND][6] = 44, [2][1][RTW89_FCC][7] = 72, [2][1][RTW89_ETSI][7] = 46, [2][1][RTW89_MKK][7] = 46, @@ -46828,6 +48794,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][7] = 46, [2][1][RTW89_CHILE][7] = 42, [2][1][RTW89_QATAR][7] = 46, + [2][1][RTW89_THAILAND][7] = 46, [2][1][RTW89_FCC][8] = 68, [2][1][RTW89_ETSI][8] = 46, [2][1][RTW89_MKK][8] = 46, @@ -46840,6 +48807,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][8] = 46, [2][1][RTW89_CHILE][8] = 42, [2][1][RTW89_QATAR][8] = 46, + [2][1][RTW89_THAILAND][8] = 46, [2][1][RTW89_FCC][9] = 64, [2][1][RTW89_ETSI][9] = 46, [2][1][RTW89_MKK][9] = 46, @@ -46852,6 +48820,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][9] = 46, [2][1][RTW89_CHILE][9] = 42, [2][1][RTW89_QATAR][9] = 46, + [2][1][RTW89_THAILAND][9] = 46, [2][1][RTW89_FCC][10] = 64, [2][1][RTW89_ETSI][10] = 46, [2][1][RTW89_MKK][10] = 46, @@ -46864,6 +48833,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][10] = 46, [2][1][RTW89_CHILE][10] = 64, [2][1][RTW89_QATAR][10] = 46, + [2][1][RTW89_THAILAND][10] = 46, [2][1][RTW89_FCC][11] = 46, [2][1][RTW89_ETSI][11] = 46, [2][1][RTW89_MKK][11] = 46, @@ -46876,6 +48846,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][11] = 46, [2][1][RTW89_CHILE][11] = 46, [2][1][RTW89_QATAR][11] = 46, + [2][1][RTW89_THAILAND][11] = 46, [2][1][RTW89_FCC][12] = 6, [2][1][RTW89_ETSI][12] = 44, [2][1][RTW89_MKK][12] = 46, @@ -46888,6 +48859,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][12] = 44, [2][1][RTW89_CHILE][12] = 6, [2][1][RTW89_QATAR][12] = 44, + [2][1][RTW89_THAILAND][12] = 44, [2][1][RTW89_FCC][13] = 127, [2][1][RTW89_ETSI][13] = 127, [2][1][RTW89_MKK][13] = 127, @@ -46900,6 +48872,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][13] = 127, [2][1][RTW89_CHILE][13] = 127, [2][1][RTW89_QATAR][13] = 127, + [2][1][RTW89_THAILAND][13] = 127, }; static @@ -47085,6 +49058,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][0] = 22, [0][0][RTW89_CHILE][0] = 50, [0][0][RTW89_QATAR][0] = 30, + [0][0][RTW89_THAILAND][0] = 30, [0][0][RTW89_FCC][2] = 50, [0][0][RTW89_ETSI][2] = 30, [0][0][RTW89_MKK][2] = 36, @@ -47097,6 +49071,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][2] = 22, [0][0][RTW89_CHILE][2] = 50, [0][0][RTW89_QATAR][2] = 30, + [0][0][RTW89_THAILAND][2] = 30, [0][0][RTW89_FCC][4] = 50, [0][0][RTW89_ETSI][4] = 30, [0][0][RTW89_MKK][4] = 22, @@ -47109,6 +49084,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][4] = 22, [0][0][RTW89_CHILE][4] = 50, [0][0][RTW89_QATAR][4] = 30, + [0][0][RTW89_THAILAND][4] = 30, [0][0][RTW89_FCC][6] = 50, [0][0][RTW89_ETSI][6] = 30, [0][0][RTW89_MKK][6] = 22, @@ -47121,6 +49097,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][6] = 22, [0][0][RTW89_CHILE][6] = 50, [0][0][RTW89_QATAR][6] = 30, + [0][0][RTW89_THAILAND][6] = 30, [0][0][RTW89_FCC][8] = 52, [0][0][RTW89_ETSI][8] = 28, [0][0][RTW89_MKK][8] = 18, @@ -47133,6 +49110,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][8] = 22, [0][0][RTW89_CHILE][8] = 52, [0][0][RTW89_QATAR][8] = 28, + [0][0][RTW89_THAILAND][8] = 28, [0][0][RTW89_FCC][10] = 52, [0][0][RTW89_ETSI][10] = 28, [0][0][RTW89_MKK][10] = 18, @@ -47145,6 +49123,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][10] = 22, [0][0][RTW89_CHILE][10] = 52, [0][0][RTW89_QATAR][10] = 28, + [0][0][RTW89_THAILAND][10] = 28, [0][0][RTW89_FCC][12] = 52, [0][0][RTW89_ETSI][12] = 28, [0][0][RTW89_MKK][12] = 34, @@ -47157,6 +49136,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][12] = 22, [0][0][RTW89_CHILE][12] = 52, [0][0][RTW89_QATAR][12] = 28, + [0][0][RTW89_THAILAND][12] = 28, [0][0][RTW89_FCC][14] = 52, [0][0][RTW89_ETSI][14] = 28, [0][0][RTW89_MKK][14] = 34, @@ -47169,6 +49149,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][14] = 22, [0][0][RTW89_CHILE][14] = 52, [0][0][RTW89_QATAR][14] = 28, + [0][0][RTW89_THAILAND][14] = 28, [0][0][RTW89_FCC][15] = 52, [0][0][RTW89_ETSI][15] = 30, [0][0][RTW89_MKK][15] = 56, @@ -47181,6 +49162,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][15] = 22, [0][0][RTW89_CHILE][15] = 52, [0][0][RTW89_QATAR][15] = 30, + [0][0][RTW89_THAILAND][15] = 30, [0][0][RTW89_FCC][17] = 52, [0][0][RTW89_ETSI][17] = 30, [0][0][RTW89_MKK][17] = 58, @@ -47193,6 +49175,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][17] = 22, [0][0][RTW89_CHILE][17] = 52, [0][0][RTW89_QATAR][17] = 30, + [0][0][RTW89_THAILAND][17] = 30, [0][0][RTW89_FCC][19] = 52, [0][0][RTW89_ETSI][19] = 30, [0][0][RTW89_MKK][19] = 58, @@ -47205,6 +49188,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][19] = 22, [0][0][RTW89_CHILE][19] = 52, [0][0][RTW89_QATAR][19] = 30, + [0][0][RTW89_THAILAND][19] = 30, [0][0][RTW89_FCC][21] = 52, [0][0][RTW89_ETSI][21] = 30, [0][0][RTW89_MKK][21] = 58, @@ -47217,6 +49201,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][21] = 22, [0][0][RTW89_CHILE][21] = 52, [0][0][RTW89_QATAR][21] = 30, + [0][0][RTW89_THAILAND][21] = 30, [0][0][RTW89_FCC][23] = 52, [0][0][RTW89_ETSI][23] = 30, [0][0][RTW89_MKK][23] = 58, @@ -47229,6 +49214,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][23] = 22, [0][0][RTW89_CHILE][23] = 52, [0][0][RTW89_QATAR][23] = 30, + [0][0][RTW89_THAILAND][23] = 30, [0][0][RTW89_FCC][25] = 52, [0][0][RTW89_ETSI][25] = 30, [0][0][RTW89_MKK][25] = 58, @@ -47241,6 +49227,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][25] = 22, [0][0][RTW89_CHILE][25] = 52, [0][0][RTW89_QATAR][25] = 30, + [0][0][RTW89_THAILAND][25] = 30, [0][0][RTW89_FCC][27] = 52, [0][0][RTW89_ETSI][27] = 30, [0][0][RTW89_MKK][27] = 58, @@ -47253,6 +49240,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][27] = 22, [0][0][RTW89_CHILE][27] = 52, [0][0][RTW89_QATAR][27] = 30, + [0][0][RTW89_THAILAND][27] = 30, [0][0][RTW89_FCC][29] = 52, [0][0][RTW89_ETSI][29] = 30, [0][0][RTW89_MKK][29] = 58, @@ -47265,6 +49253,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][29] = 22, [0][0][RTW89_CHILE][29] = 52, [0][0][RTW89_QATAR][29] = 30, + [0][0][RTW89_THAILAND][29] = 30, [0][0][RTW89_FCC][31] = 52, [0][0][RTW89_ETSI][31] = 30, [0][0][RTW89_MKK][31] = 58, @@ -47277,6 +49266,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][31] = 22, [0][0][RTW89_CHILE][31] = 52, [0][0][RTW89_QATAR][31] = 30, + [0][0][RTW89_THAILAND][31] = 30, [0][0][RTW89_FCC][33] = 44, [0][0][RTW89_ETSI][33] = 30, [0][0][RTW89_MKK][33] = 58, @@ -47289,6 +49279,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][33] = 22, [0][0][RTW89_CHILE][33] = 44, [0][0][RTW89_QATAR][33] = 30, + [0][0][RTW89_THAILAND][33] = 30, [0][0][RTW89_FCC][35] = 44, [0][0][RTW89_ETSI][35] = 30, [0][0][RTW89_MKK][35] = 58, @@ -47301,6 +49292,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][35] = 22, [0][0][RTW89_CHILE][35] = 44, [0][0][RTW89_QATAR][35] = 30, + [0][0][RTW89_THAILAND][35] = 30, [0][0][RTW89_FCC][37] = 52, [0][0][RTW89_ETSI][37] = 127, [0][0][RTW89_MKK][37] = 58, @@ -47313,6 +49305,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][37] = 127, [0][0][RTW89_CHILE][37] = 52, [0][0][RTW89_QATAR][37] = 127, + [0][0][RTW89_THAILAND][37] = 127, [0][0][RTW89_FCC][38] = 64, [0][0][RTW89_ETSI][38] = 28, [0][0][RTW89_MKK][38] = 127, @@ -47325,6 +49318,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][38] = 26, [0][0][RTW89_CHILE][38] = 64, [0][0][RTW89_QATAR][38] = 26, + [0][0][RTW89_THAILAND][38] = 28, [0][0][RTW89_FCC][40] = 64, [0][0][RTW89_ETSI][40] = 28, [0][0][RTW89_MKK][40] = 127, @@ -47337,6 +49331,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][40] = 26, [0][0][RTW89_CHILE][40] = 64, [0][0][RTW89_QATAR][40] = 26, + [0][0][RTW89_THAILAND][40] = 28, [0][0][RTW89_FCC][42] = 60, [0][0][RTW89_ETSI][42] = 28, [0][0][RTW89_MKK][42] = 127, @@ -47349,6 +49344,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][42] = 26, [0][0][RTW89_CHILE][42] = 60, [0][0][RTW89_QATAR][42] = 26, + [0][0][RTW89_THAILAND][42] = 28, [0][0][RTW89_FCC][44] = 60, [0][0][RTW89_ETSI][44] = 28, [0][0][RTW89_MKK][44] = 127, @@ -47361,6 +49357,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][44] = 26, [0][0][RTW89_CHILE][44] = 60, [0][0][RTW89_QATAR][44] = 26, + [0][0][RTW89_THAILAND][44] = 28, [0][0][RTW89_FCC][46] = 60, [0][0][RTW89_ETSI][46] = 28, [0][0][RTW89_MKK][46] = 127, @@ -47373,6 +49370,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][46] = 26, [0][0][RTW89_CHILE][46] = 60, [0][0][RTW89_QATAR][46] = 26, + [0][0][RTW89_THAILAND][46] = 28, [0][0][RTW89_FCC][48] = 46, [0][0][RTW89_ETSI][48] = 127, [0][0][RTW89_MKK][48] = 127, @@ -47385,6 +49383,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][48] = 127, [0][0][RTW89_CHILE][48] = 127, [0][0][RTW89_QATAR][48] = 127, + [0][0][RTW89_THAILAND][48] = 127, [0][0][RTW89_FCC][50] = 44, [0][0][RTW89_ETSI][50] = 127, [0][0][RTW89_MKK][50] = 127, @@ -47397,6 +49396,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][50] = 127, [0][0][RTW89_CHILE][50] = 127, [0][0][RTW89_QATAR][50] = 127, + [0][0][RTW89_THAILAND][50] = 127, [0][0][RTW89_FCC][52] = 34, [0][0][RTW89_ETSI][52] = 127, [0][0][RTW89_MKK][52] = 127, @@ -47409,6 +49409,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][52] = 127, [0][0][RTW89_CHILE][52] = 127, [0][0][RTW89_QATAR][52] = 127, + [0][0][RTW89_THAILAND][52] = 127, [0][1][RTW89_FCC][0] = 30, [0][1][RTW89_ETSI][0] = 18, [0][1][RTW89_MKK][0] = 20, @@ -47421,6 +49422,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][0] = 10, [0][1][RTW89_CHILE][0] = 30, [0][1][RTW89_QATAR][0] = 18, + [0][1][RTW89_THAILAND][0] = 18, [0][1][RTW89_FCC][2] = 32, [0][1][RTW89_ETSI][2] = 18, [0][1][RTW89_MKK][2] = 20, @@ -47433,6 +49435,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][2] = 10, [0][1][RTW89_CHILE][2] = 32, [0][1][RTW89_QATAR][2] = 18, + [0][1][RTW89_THAILAND][2] = 18, [0][1][RTW89_FCC][4] = 30, [0][1][RTW89_ETSI][4] = 18, [0][1][RTW89_MKK][4] = 8, @@ -47445,6 +49448,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][4] = 10, [0][1][RTW89_CHILE][4] = 30, [0][1][RTW89_QATAR][4] = 18, + [0][1][RTW89_THAILAND][4] = 18, [0][1][RTW89_FCC][6] = 30, [0][1][RTW89_ETSI][6] = 18, [0][1][RTW89_MKK][6] = 8, @@ -47457,6 +49461,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][6] = 10, [0][1][RTW89_CHILE][6] = 30, [0][1][RTW89_QATAR][6] = 18, + [0][1][RTW89_THAILAND][6] = 18, [0][1][RTW89_FCC][8] = 30, [0][1][RTW89_ETSI][8] = 16, [0][1][RTW89_MKK][8] = 20, @@ -47469,6 +49474,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][8] = 10, [0][1][RTW89_CHILE][8] = 30, [0][1][RTW89_QATAR][8] = 16, + [0][1][RTW89_THAILAND][8] = 16, [0][1][RTW89_FCC][10] = 30, [0][1][RTW89_ETSI][10] = 16, [0][1][RTW89_MKK][10] = 20, @@ -47481,6 +49487,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][10] = 10, [0][1][RTW89_CHILE][10] = 30, [0][1][RTW89_QATAR][10] = 16, + [0][1][RTW89_THAILAND][10] = 16, [0][1][RTW89_FCC][12] = 30, [0][1][RTW89_ETSI][12] = 16, [0][1][RTW89_MKK][12] = 34, @@ -47493,6 +49500,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][12] = 10, [0][1][RTW89_CHILE][12] = 30, [0][1][RTW89_QATAR][12] = 16, + [0][1][RTW89_THAILAND][12] = 16, [0][1][RTW89_FCC][14] = 30, [0][1][RTW89_ETSI][14] = 16, [0][1][RTW89_MKK][14] = 34, @@ -47505,6 +49513,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][14] = 10, [0][1][RTW89_CHILE][14] = 30, [0][1][RTW89_QATAR][14] = 16, + [0][1][RTW89_THAILAND][14] = 16, [0][1][RTW89_FCC][15] = 32, [0][1][RTW89_ETSI][15] = 18, [0][1][RTW89_MKK][15] = 44, @@ -47517,6 +49526,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][15] = 10, [0][1][RTW89_CHILE][15] = 32, [0][1][RTW89_QATAR][15] = 18, + [0][1][RTW89_THAILAND][15] = 18, [0][1][RTW89_FCC][17] = 32, [0][1][RTW89_ETSI][17] = 18, [0][1][RTW89_MKK][17] = 44, @@ -47529,6 +49539,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][17] = 10, [0][1][RTW89_CHILE][17] = 32, [0][1][RTW89_QATAR][17] = 18, + [0][1][RTW89_THAILAND][17] = 18, [0][1][RTW89_FCC][19] = 32, [0][1][RTW89_ETSI][19] = 18, [0][1][RTW89_MKK][19] = 44, @@ -47541,6 +49552,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][19] = 10, [0][1][RTW89_CHILE][19] = 32, [0][1][RTW89_QATAR][19] = 18, + [0][1][RTW89_THAILAND][19] = 18, [0][1][RTW89_FCC][21] = 32, [0][1][RTW89_ETSI][21] = 18, [0][1][RTW89_MKK][21] = 44, @@ -47553,6 +49565,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][21] = 10, [0][1][RTW89_CHILE][21] = 32, [0][1][RTW89_QATAR][21] = 18, + [0][1][RTW89_THAILAND][21] = 18, [0][1][RTW89_FCC][23] = 32, [0][1][RTW89_ETSI][23] = 18, [0][1][RTW89_MKK][23] = 44, @@ -47565,6 +49578,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][23] = 10, [0][1][RTW89_CHILE][23] = 32, [0][1][RTW89_QATAR][23] = 18, + [0][1][RTW89_THAILAND][23] = 18, [0][1][RTW89_FCC][25] = 32, [0][1][RTW89_ETSI][25] = 18, [0][1][RTW89_MKK][25] = 44, @@ -47577,6 +49591,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][25] = 10, [0][1][RTW89_CHILE][25] = 32, [0][1][RTW89_QATAR][25] = 18, + [0][1][RTW89_THAILAND][25] = 18, [0][1][RTW89_FCC][27] = 32, [0][1][RTW89_ETSI][27] = 16, [0][1][RTW89_MKK][27] = 44, @@ -47589,6 +49604,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][27] = 10, [0][1][RTW89_CHILE][27] = 32, [0][1][RTW89_QATAR][27] = 16, + [0][1][RTW89_THAILAND][27] = 16, [0][1][RTW89_FCC][29] = 32, [0][1][RTW89_ETSI][29] = 16, [0][1][RTW89_MKK][29] = 44, @@ -47601,6 +49617,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][29] = 10, [0][1][RTW89_CHILE][29] = 32, [0][1][RTW89_QATAR][29] = 16, + [0][1][RTW89_THAILAND][29] = 16, [0][1][RTW89_FCC][31] = 32, [0][1][RTW89_ETSI][31] = 16, [0][1][RTW89_MKK][31] = 44, @@ -47613,6 +49630,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][31] = 10, [0][1][RTW89_CHILE][31] = 32, [0][1][RTW89_QATAR][31] = 16, + [0][1][RTW89_THAILAND][31] = 16, [0][1][RTW89_FCC][33] = 30, [0][1][RTW89_ETSI][33] = 16, [0][1][RTW89_MKK][33] = 44, @@ -47625,6 +49643,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][33] = 10, [0][1][RTW89_CHILE][33] = 30, [0][1][RTW89_QATAR][33] = 16, + [0][1][RTW89_THAILAND][33] = 16, [0][1][RTW89_FCC][35] = 30, [0][1][RTW89_ETSI][35] = 16, [0][1][RTW89_MKK][35] = 44, @@ -47637,6 +49656,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][35] = 10, [0][1][RTW89_CHILE][35] = 30, [0][1][RTW89_QATAR][35] = 16, + [0][1][RTW89_THAILAND][35] = 16, [0][1][RTW89_FCC][37] = 34, [0][1][RTW89_ETSI][37] = 127, [0][1][RTW89_MKK][37] = 44, @@ -47649,6 +49669,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][37] = 127, [0][1][RTW89_CHILE][37] = 34, [0][1][RTW89_QATAR][37] = 127, + [0][1][RTW89_THAILAND][37] = 127, [0][1][RTW89_FCC][38] = 62, [0][1][RTW89_ETSI][38] = 16, [0][1][RTW89_MKK][38] = 127, @@ -47661,6 +49682,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][38] = 14, [0][1][RTW89_CHILE][38] = 62, [0][1][RTW89_QATAR][38] = 14, + [0][1][RTW89_THAILAND][38] = 16, [0][1][RTW89_FCC][40] = 62, [0][1][RTW89_ETSI][40] = 16, [0][1][RTW89_MKK][40] = 127, @@ -47673,6 +49695,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][40] = 14, [0][1][RTW89_CHILE][40] = 62, [0][1][RTW89_QATAR][40] = 14, + [0][1][RTW89_THAILAND][40] = 16, [0][1][RTW89_FCC][42] = 58, [0][1][RTW89_ETSI][42] = 16, [0][1][RTW89_MKK][42] = 127, @@ -47685,6 +49708,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][42] = 14, [0][1][RTW89_CHILE][42] = 58, [0][1][RTW89_QATAR][42] = 14, + [0][1][RTW89_THAILAND][42] = 16, [0][1][RTW89_FCC][44] = 56, [0][1][RTW89_ETSI][44] = 16, [0][1][RTW89_MKK][44] = 127, @@ -47697,6 +49721,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][44] = 14, [0][1][RTW89_CHILE][44] = 56, [0][1][RTW89_QATAR][44] = 14, + [0][1][RTW89_THAILAND][44] = 16, [0][1][RTW89_FCC][46] = 56, [0][1][RTW89_ETSI][46] = 16, [0][1][RTW89_MKK][46] = 127, @@ -47709,6 +49734,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][46] = 14, [0][1][RTW89_CHILE][46] = 56, [0][1][RTW89_QATAR][46] = 14, + [0][1][RTW89_THAILAND][46] = 16, [0][1][RTW89_FCC][48] = 20, [0][1][RTW89_ETSI][48] = 127, [0][1][RTW89_MKK][48] = 127, @@ -47721,6 +49747,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][48] = 127, [0][1][RTW89_CHILE][48] = 127, [0][1][RTW89_QATAR][48] = 127, + [0][1][RTW89_THAILAND][48] = 127, [0][1][RTW89_FCC][50] = 20, [0][1][RTW89_ETSI][50] = 127, [0][1][RTW89_MKK][50] = 127, @@ -47733,6 +49760,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][50] = 127, [0][1][RTW89_CHILE][50] = 127, [0][1][RTW89_QATAR][50] = 127, + [0][1][RTW89_THAILAND][50] = 127, [0][1][RTW89_FCC][52] = 8, [0][1][RTW89_ETSI][52] = 127, [0][1][RTW89_MKK][52] = 127, @@ -47745,6 +49773,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][52] = 127, [0][1][RTW89_CHILE][52] = 127, [0][1][RTW89_QATAR][52] = 127, + [0][1][RTW89_THAILAND][52] = 127, [1][0][RTW89_FCC][0] = 62, [1][0][RTW89_ETSI][0] = 40, [1][0][RTW89_MKK][0] = 48, @@ -47757,6 +49786,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][0] = 32, [1][0][RTW89_CHILE][0] = 62, [1][0][RTW89_QATAR][0] = 40, + [1][0][RTW89_THAILAND][0] = 40, [1][0][RTW89_FCC][2] = 62, [1][0][RTW89_ETSI][2] = 40, [1][0][RTW89_MKK][2] = 48, @@ -47769,6 +49799,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][2] = 32, [1][0][RTW89_CHILE][2] = 62, [1][0][RTW89_QATAR][2] = 40, + [1][0][RTW89_THAILAND][2] = 40, [1][0][RTW89_FCC][4] = 64, [1][0][RTW89_ETSI][4] = 40, [1][0][RTW89_MKK][4] = 40, @@ -47781,6 +49812,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][4] = 32, [1][0][RTW89_CHILE][4] = 64, [1][0][RTW89_QATAR][4] = 40, + [1][0][RTW89_THAILAND][4] = 40, [1][0][RTW89_FCC][6] = 64, [1][0][RTW89_ETSI][6] = 40, [1][0][RTW89_MKK][6] = 40, @@ -47793,6 +49825,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][6] = 32, [1][0][RTW89_CHILE][6] = 64, [1][0][RTW89_QATAR][6] = 40, + [1][0][RTW89_THAILAND][6] = 40, [1][0][RTW89_FCC][8] = 62, [1][0][RTW89_ETSI][8] = 40, [1][0][RTW89_MKK][8] = 34, @@ -47805,6 +49838,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][8] = 32, [1][0][RTW89_CHILE][8] = 62, [1][0][RTW89_QATAR][8] = 40, + [1][0][RTW89_THAILAND][8] = 40, [1][0][RTW89_FCC][10] = 62, [1][0][RTW89_ETSI][10] = 40, [1][0][RTW89_MKK][10] = 34, @@ -47817,6 +49851,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][10] = 32, [1][0][RTW89_CHILE][10] = 62, [1][0][RTW89_QATAR][10] = 40, + [1][0][RTW89_THAILAND][10] = 40, [1][0][RTW89_FCC][12] = 62, [1][0][RTW89_ETSI][12] = 40, [1][0][RTW89_MKK][12] = 46, @@ -47829,6 +49864,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][12] = 32, [1][0][RTW89_CHILE][12] = 62, [1][0][RTW89_QATAR][12] = 40, + [1][0][RTW89_THAILAND][12] = 40, [1][0][RTW89_FCC][14] = 62, [1][0][RTW89_ETSI][14] = 40, [1][0][RTW89_MKK][14] = 46, @@ -47841,6 +49877,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][14] = 32, [1][0][RTW89_CHILE][14] = 62, [1][0][RTW89_QATAR][14] = 40, + [1][0][RTW89_THAILAND][14] = 40, [1][0][RTW89_FCC][15] = 62, [1][0][RTW89_ETSI][15] = 40, [1][0][RTW89_MKK][15] = 62, @@ -47853,6 +49890,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][15] = 32, [1][0][RTW89_CHILE][15] = 62, [1][0][RTW89_QATAR][15] = 40, + [1][0][RTW89_THAILAND][15] = 40, [1][0][RTW89_FCC][17] = 62, [1][0][RTW89_ETSI][17] = 40, [1][0][RTW89_MKK][17] = 68, @@ -47865,6 +49903,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][17] = 32, [1][0][RTW89_CHILE][17] = 62, [1][0][RTW89_QATAR][17] = 40, + [1][0][RTW89_THAILAND][17] = 40, [1][0][RTW89_FCC][19] = 64, [1][0][RTW89_ETSI][19] = 40, [1][0][RTW89_MKK][19] = 68, @@ -47877,6 +49916,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][19] = 32, [1][0][RTW89_CHILE][19] = 64, [1][0][RTW89_QATAR][19] = 40, + [1][0][RTW89_THAILAND][19] = 40, [1][0][RTW89_FCC][21] = 64, [1][0][RTW89_ETSI][21] = 40, [1][0][RTW89_MKK][21] = 68, @@ -47889,6 +49929,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][21] = 32, [1][0][RTW89_CHILE][21] = 64, [1][0][RTW89_QATAR][21] = 40, + [1][0][RTW89_THAILAND][21] = 40, [1][0][RTW89_FCC][23] = 64, [1][0][RTW89_ETSI][23] = 40, [1][0][RTW89_MKK][23] = 68, @@ -47901,6 +49942,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][23] = 32, [1][0][RTW89_CHILE][23] = 64, [1][0][RTW89_QATAR][23] = 40, + [1][0][RTW89_THAILAND][23] = 40, [1][0][RTW89_FCC][25] = 64, [1][0][RTW89_ETSI][25] = 40, [1][0][RTW89_MKK][25] = 68, @@ -47913,6 +49955,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][25] = 32, [1][0][RTW89_CHILE][25] = 64, [1][0][RTW89_QATAR][25] = 40, + [1][0][RTW89_THAILAND][25] = 40, [1][0][RTW89_FCC][27] = 64, [1][0][RTW89_ETSI][27] = 42, [1][0][RTW89_MKK][27] = 68, @@ -47925,6 +49968,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][27] = 32, [1][0][RTW89_CHILE][27] = 64, [1][0][RTW89_QATAR][27] = 42, + [1][0][RTW89_THAILAND][27] = 42, [1][0][RTW89_FCC][29] = 64, [1][0][RTW89_ETSI][29] = 42, [1][0][RTW89_MKK][29] = 68, @@ -47937,6 +49981,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][29] = 32, [1][0][RTW89_CHILE][29] = 64, [1][0][RTW89_QATAR][29] = 42, + [1][0][RTW89_THAILAND][29] = 42, [1][0][RTW89_FCC][31] = 64, [1][0][RTW89_ETSI][31] = 42, [1][0][RTW89_MKK][31] = 68, @@ -47949,6 +49994,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][31] = 32, [1][0][RTW89_CHILE][31] = 64, [1][0][RTW89_QATAR][31] = 42, + [1][0][RTW89_THAILAND][31] = 42, [1][0][RTW89_FCC][33] = 56, [1][0][RTW89_ETSI][33] = 42, [1][0][RTW89_MKK][33] = 68, @@ -47961,6 +50007,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][33] = 32, [1][0][RTW89_CHILE][33] = 56, [1][0][RTW89_QATAR][33] = 42, + [1][0][RTW89_THAILAND][33] = 42, [1][0][RTW89_FCC][35] = 56, [1][0][RTW89_ETSI][35] = 42, [1][0][RTW89_MKK][35] = 68, @@ -47973,6 +50020,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][35] = 32, [1][0][RTW89_CHILE][35] = 56, [1][0][RTW89_QATAR][35] = 42, + [1][0][RTW89_THAILAND][35] = 42, [1][0][RTW89_FCC][37] = 66, [1][0][RTW89_ETSI][37] = 127, [1][0][RTW89_MKK][37] = 68, @@ -47985,66 +50033,72 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][37] = 127, [1][0][RTW89_CHILE][37] = 66, [1][0][RTW89_QATAR][37] = 127, + [1][0][RTW89_THAILAND][37] = 127, [1][0][RTW89_FCC][38] = 76, [1][0][RTW89_ETSI][38] = 28, [1][0][RTW89_MKK][38] = 127, [1][0][RTW89_IC][38] = 76, [1][0][RTW89_KCC][38] = 54, [1][0][RTW89_ACMA][38] = 76, - [1][0][RTW89_CN][38] = 66, + [1][0][RTW89_CN][38] = 56, [1][0][RTW89_UK][38] = 44, [1][0][RTW89_MEXICO][38] = 76, [1][0][RTW89_UKRAINE][38] = 26, [1][0][RTW89_CHILE][38] = 76, [1][0][RTW89_QATAR][38] = 26, + [1][0][RTW89_THAILAND][38] = 28, [1][0][RTW89_FCC][40] = 76, [1][0][RTW89_ETSI][40] = 28, [1][0][RTW89_MKK][40] = 127, [1][0][RTW89_IC][40] = 76, [1][0][RTW89_KCC][40] = 54, [1][0][RTW89_ACMA][40] = 76, - [1][0][RTW89_CN][40] = 66, + [1][0][RTW89_CN][40] = 56, [1][0][RTW89_UK][40] = 44, [1][0][RTW89_MEXICO][40] = 76, [1][0][RTW89_UKRAINE][40] = 26, [1][0][RTW89_CHILE][40] = 76, [1][0][RTW89_QATAR][40] = 26, + [1][0][RTW89_THAILAND][40] = 28, [1][0][RTW89_FCC][42] = 68, [1][0][RTW89_ETSI][42] = 28, [1][0][RTW89_MKK][42] = 127, [1][0][RTW89_IC][42] = 68, [1][0][RTW89_KCC][42] = 54, [1][0][RTW89_ACMA][42] = 68, - [1][0][RTW89_CN][42] = 66, + [1][0][RTW89_CN][42] = 56, [1][0][RTW89_UK][42] = 44, [1][0][RTW89_MEXICO][42] = 68, [1][0][RTW89_UKRAINE][42] = 26, [1][0][RTW89_CHILE][42] = 68, [1][0][RTW89_QATAR][42] = 26, + [1][0][RTW89_THAILAND][42] = 28, [1][0][RTW89_FCC][44] = 70, [1][0][RTW89_ETSI][44] = 28, [1][0][RTW89_MKK][44] = 127, [1][0][RTW89_IC][44] = 70, [1][0][RTW89_KCC][44] = 54, [1][0][RTW89_ACMA][44] = 70, - [1][0][RTW89_CN][44] = 66, + [1][0][RTW89_CN][44] = 56, [1][0][RTW89_UK][44] = 42, [1][0][RTW89_MEXICO][44] = 70, [1][0][RTW89_UKRAINE][44] = 26, [1][0][RTW89_CHILE][44] = 70, [1][0][RTW89_QATAR][44] = 26, + [1][0][RTW89_THAILAND][44] = 28, [1][0][RTW89_FCC][46] = 70, [1][0][RTW89_ETSI][46] = 28, [1][0][RTW89_MKK][46] = 127, [1][0][RTW89_IC][46] = 70, [1][0][RTW89_KCC][46] = 54, [1][0][RTW89_ACMA][46] = 70, - [1][0][RTW89_CN][46] = 66, + [1][0][RTW89_CN][46] = 56, [1][0][RTW89_UK][46] = 42, [1][0][RTW89_MEXICO][46] = 70, [1][0][RTW89_UKRAINE][46] = 26, [1][0][RTW89_CHILE][46] = 70, [1][0][RTW89_QATAR][46] = 26, + [1][0][RTW89_THAILAND][46] = 28, [1][0][RTW89_FCC][48] = 56, [1][0][RTW89_ETSI][48] = 127, [1][0][RTW89_MKK][48] = 127, @@ -48057,6 +50111,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][48] = 127, [1][0][RTW89_CHILE][48] = 127, [1][0][RTW89_QATAR][48] = 127, + [1][0][RTW89_THAILAND][48] = 127, [1][0][RTW89_FCC][50] = 58, [1][0][RTW89_ETSI][50] = 127, [1][0][RTW89_MKK][50] = 127, @@ -48069,6 +50124,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][50] = 127, [1][0][RTW89_CHILE][50] = 127, [1][0][RTW89_QATAR][50] = 127, + [1][0][RTW89_THAILAND][50] = 127, [1][0][RTW89_FCC][52] = 56, [1][0][RTW89_ETSI][52] = 127, [1][0][RTW89_MKK][52] = 127, @@ -48081,6 +50137,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][52] = 127, [1][0][RTW89_CHILE][52] = 127, [1][0][RTW89_QATAR][52] = 127, + [1][0][RTW89_THAILAND][52] = 127, [1][1][RTW89_FCC][0] = 44, [1][1][RTW89_ETSI][0] = 30, [1][1][RTW89_MKK][0] = 34, @@ -48093,6 +50150,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][0] = 20, [1][1][RTW89_CHILE][0] = 44, [1][1][RTW89_QATAR][0] = 30, + [1][1][RTW89_THAILAND][0] = 30, [1][1][RTW89_FCC][2] = 44, [1][1][RTW89_ETSI][2] = 30, [1][1][RTW89_MKK][2] = 34, @@ -48105,6 +50163,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][2] = 20, [1][1][RTW89_CHILE][2] = 44, [1][1][RTW89_QATAR][2] = 30, + [1][1][RTW89_THAILAND][2] = 30, [1][1][RTW89_FCC][4] = 46, [1][1][RTW89_ETSI][4] = 30, [1][1][RTW89_MKK][4] = 26, @@ -48117,6 +50176,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][4] = 20, [1][1][RTW89_CHILE][4] = 46, [1][1][RTW89_QATAR][4] = 30, + [1][1][RTW89_THAILAND][4] = 30, [1][1][RTW89_FCC][6] = 46, [1][1][RTW89_ETSI][6] = 30, [1][1][RTW89_MKK][6] = 26, @@ -48129,6 +50189,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][6] = 20, [1][1][RTW89_CHILE][6] = 46, [1][1][RTW89_QATAR][6] = 30, + [1][1][RTW89_THAILAND][6] = 30, [1][1][RTW89_FCC][8] = 44, [1][1][RTW89_ETSI][8] = 30, [1][1][RTW89_MKK][8] = 20, @@ -48141,6 +50202,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][8] = 20, [1][1][RTW89_CHILE][8] = 44, [1][1][RTW89_QATAR][8] = 30, + [1][1][RTW89_THAILAND][8] = 30, [1][1][RTW89_FCC][10] = 44, [1][1][RTW89_ETSI][10] = 30, [1][1][RTW89_MKK][10] = 20, @@ -48153,6 +50215,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][10] = 20, [1][1][RTW89_CHILE][10] = 44, [1][1][RTW89_QATAR][10] = 30, + [1][1][RTW89_THAILAND][10] = 30, [1][1][RTW89_FCC][12] = 44, [1][1][RTW89_ETSI][12] = 30, [1][1][RTW89_MKK][12] = 34, @@ -48165,6 +50228,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][12] = 20, [1][1][RTW89_CHILE][12] = 44, [1][1][RTW89_QATAR][12] = 30, + [1][1][RTW89_THAILAND][12] = 30, [1][1][RTW89_FCC][14] = 44, [1][1][RTW89_ETSI][14] = 30, [1][1][RTW89_MKK][14] = 34, @@ -48177,6 +50241,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][14] = 20, [1][1][RTW89_CHILE][14] = 44, [1][1][RTW89_QATAR][14] = 30, + [1][1][RTW89_THAILAND][14] = 30, [1][1][RTW89_FCC][15] = 44, [1][1][RTW89_ETSI][15] = 28, [1][1][RTW89_MKK][15] = 56, @@ -48189,6 +50254,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][15] = 20, [1][1][RTW89_CHILE][15] = 44, [1][1][RTW89_QATAR][15] = 28, + [1][1][RTW89_THAILAND][15] = 28, [1][1][RTW89_FCC][17] = 44, [1][1][RTW89_ETSI][17] = 28, [1][1][RTW89_MKK][17] = 58, @@ -48201,6 +50267,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][17] = 20, [1][1][RTW89_CHILE][17] = 44, [1][1][RTW89_QATAR][17] = 28, + [1][1][RTW89_THAILAND][17] = 28, [1][1][RTW89_FCC][19] = 44, [1][1][RTW89_ETSI][19] = 28, [1][1][RTW89_MKK][19] = 58, @@ -48213,6 +50280,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][19] = 20, [1][1][RTW89_CHILE][19] = 44, [1][1][RTW89_QATAR][19] = 28, + [1][1][RTW89_THAILAND][19] = 28, [1][1][RTW89_FCC][21] = 44, [1][1][RTW89_ETSI][21] = 28, [1][1][RTW89_MKK][21] = 58, @@ -48225,6 +50293,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][21] = 20, [1][1][RTW89_CHILE][21] = 44, [1][1][RTW89_QATAR][21] = 28, + [1][1][RTW89_THAILAND][21] = 28, [1][1][RTW89_FCC][23] = 44, [1][1][RTW89_ETSI][23] = 28, [1][1][RTW89_MKK][23] = 58, @@ -48237,6 +50306,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][23] = 20, [1][1][RTW89_CHILE][23] = 44, [1][1][RTW89_QATAR][23] = 28, + [1][1][RTW89_THAILAND][23] = 28, [1][1][RTW89_FCC][25] = 44, [1][1][RTW89_ETSI][25] = 28, [1][1][RTW89_MKK][25] = 58, @@ -48249,6 +50319,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][25] = 20, [1][1][RTW89_CHILE][25] = 44, [1][1][RTW89_QATAR][25] = 28, + [1][1][RTW89_THAILAND][25] = 28, [1][1][RTW89_FCC][27] = 44, [1][1][RTW89_ETSI][27] = 30, [1][1][RTW89_MKK][27] = 58, @@ -48261,6 +50332,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][27] = 20, [1][1][RTW89_CHILE][27] = 44, [1][1][RTW89_QATAR][27] = 30, + [1][1][RTW89_THAILAND][27] = 30, [1][1][RTW89_FCC][29] = 44, [1][1][RTW89_ETSI][29] = 30, [1][1][RTW89_MKK][29] = 58, @@ -48273,6 +50345,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][29] = 20, [1][1][RTW89_CHILE][29] = 44, [1][1][RTW89_QATAR][29] = 30, + [1][1][RTW89_THAILAND][29] = 30, [1][1][RTW89_FCC][31] = 44, [1][1][RTW89_ETSI][31] = 30, [1][1][RTW89_MKK][31] = 58, @@ -48285,6 +50358,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][31] = 20, [1][1][RTW89_CHILE][31] = 44, [1][1][RTW89_QATAR][31] = 30, + [1][1][RTW89_THAILAND][31] = 30, [1][1][RTW89_FCC][33] = 38, [1][1][RTW89_ETSI][33] = 30, [1][1][RTW89_MKK][33] = 58, @@ -48297,6 +50371,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][33] = 20, [1][1][RTW89_CHILE][33] = 38, [1][1][RTW89_QATAR][33] = 30, + [1][1][RTW89_THAILAND][33] = 30, [1][1][RTW89_FCC][35] = 38, [1][1][RTW89_ETSI][35] = 30, [1][1][RTW89_MKK][35] = 58, @@ -48309,6 +50384,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][35] = 20, [1][1][RTW89_CHILE][35] = 38, [1][1][RTW89_QATAR][35] = 30, + [1][1][RTW89_THAILAND][35] = 30, [1][1][RTW89_FCC][37] = 46, [1][1][RTW89_ETSI][37] = 127, [1][1][RTW89_MKK][37] = 58, @@ -48321,6 +50397,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][37] = 127, [1][1][RTW89_CHILE][37] = 46, [1][1][RTW89_QATAR][37] = 127, + [1][1][RTW89_THAILAND][37] = 127, [1][1][RTW89_FCC][38] = 74, [1][1][RTW89_ETSI][38] = 16, [1][1][RTW89_MKK][38] = 127, @@ -48333,6 +50410,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][38] = 14, [1][1][RTW89_CHILE][38] = 72, [1][1][RTW89_QATAR][38] = 14, + [1][1][RTW89_THAILAND][38] = 16, [1][1][RTW89_FCC][40] = 74, [1][1][RTW89_ETSI][40] = 16, [1][1][RTW89_MKK][40] = 127, @@ -48345,6 +50423,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][40] = 14, [1][1][RTW89_CHILE][40] = 72, [1][1][RTW89_QATAR][40] = 14, + [1][1][RTW89_THAILAND][40] = 16, [1][1][RTW89_FCC][42] = 74, [1][1][RTW89_ETSI][42] = 16, [1][1][RTW89_MKK][42] = 127, @@ -48357,6 +50436,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][42] = 14, [1][1][RTW89_CHILE][42] = 72, [1][1][RTW89_QATAR][42] = 14, + [1][1][RTW89_THAILAND][42] = 16, [1][1][RTW89_FCC][44] = 74, [1][1][RTW89_ETSI][44] = 16, [1][1][RTW89_MKK][44] = 127, @@ -48369,6 +50449,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][44] = 14, [1][1][RTW89_CHILE][44] = 72, [1][1][RTW89_QATAR][44] = 14, + [1][1][RTW89_THAILAND][44] = 16, [1][1][RTW89_FCC][46] = 74, [1][1][RTW89_ETSI][46] = 16, [1][1][RTW89_MKK][46] = 127, @@ -48381,6 +50462,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][46] = 14, [1][1][RTW89_CHILE][46] = 72, [1][1][RTW89_QATAR][46] = 14, + [1][1][RTW89_THAILAND][46] = 16, [1][1][RTW89_FCC][48] = 34, [1][1][RTW89_ETSI][48] = 127, [1][1][RTW89_MKK][48] = 127, @@ -48393,6 +50475,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][48] = 127, [1][1][RTW89_CHILE][48] = 127, [1][1][RTW89_QATAR][48] = 127, + [1][1][RTW89_THAILAND][48] = 127, [1][1][RTW89_FCC][50] = 34, [1][1][RTW89_ETSI][50] = 127, [1][1][RTW89_MKK][50] = 127, @@ -48405,6 +50488,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][50] = 127, [1][1][RTW89_CHILE][50] = 127, [1][1][RTW89_QATAR][50] = 127, + [1][1][RTW89_THAILAND][50] = 127, [1][1][RTW89_FCC][52] = 30, [1][1][RTW89_ETSI][52] = 127, [1][1][RTW89_MKK][52] = 127, @@ -48417,6 +50501,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][52] = 127, [1][1][RTW89_CHILE][52] = 127, [1][1][RTW89_QATAR][52] = 127, + [1][1][RTW89_THAILAND][52] = 127, [2][0][RTW89_FCC][0] = 68, [2][0][RTW89_ETSI][0] = 52, [2][0][RTW89_MKK][0] = 60, @@ -48429,6 +50514,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][0] = 46, [2][0][RTW89_CHILE][0] = 68, [2][0][RTW89_QATAR][0] = 52, + [2][0][RTW89_THAILAND][0] = 52, [2][0][RTW89_FCC][2] = 64, [2][0][RTW89_ETSI][2] = 52, [2][0][RTW89_MKK][2] = 60, @@ -48441,6 +50527,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][2] = 46, [2][0][RTW89_CHILE][2] = 64, [2][0][RTW89_QATAR][2] = 52, + [2][0][RTW89_THAILAND][2] = 52, [2][0][RTW89_FCC][4] = 68, [2][0][RTW89_ETSI][4] = 52, [2][0][RTW89_MKK][4] = 50, @@ -48453,6 +50540,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][4] = 46, [2][0][RTW89_CHILE][4] = 68, [2][0][RTW89_QATAR][4] = 52, + [2][0][RTW89_THAILAND][4] = 52, [2][0][RTW89_FCC][6] = 68, [2][0][RTW89_ETSI][6] = 52, [2][0][RTW89_MKK][6] = 50, @@ -48465,6 +50553,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][6] = 46, [2][0][RTW89_CHILE][6] = 68, [2][0][RTW89_QATAR][6] = 52, + [2][0][RTW89_THAILAND][6] = 52, [2][0][RTW89_FCC][8] = 68, [2][0][RTW89_ETSI][8] = 52, [2][0][RTW89_MKK][8] = 44, @@ -48477,6 +50566,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][8] = 46, [2][0][RTW89_CHILE][8] = 68, [2][0][RTW89_QATAR][8] = 52, + [2][0][RTW89_THAILAND][8] = 52, [2][0][RTW89_FCC][10] = 68, [2][0][RTW89_ETSI][10] = 52, [2][0][RTW89_MKK][10] = 44, @@ -48489,6 +50579,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][10] = 46, [2][0][RTW89_CHILE][10] = 68, [2][0][RTW89_QATAR][10] = 52, + [2][0][RTW89_THAILAND][10] = 52, [2][0][RTW89_FCC][12] = 68, [2][0][RTW89_ETSI][12] = 52, [2][0][RTW89_MKK][12] = 58, @@ -48501,6 +50592,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][12] = 46, [2][0][RTW89_CHILE][12] = 68, [2][0][RTW89_QATAR][12] = 52, + [2][0][RTW89_THAILAND][12] = 52, [2][0][RTW89_FCC][14] = 68, [2][0][RTW89_ETSI][14] = 52, [2][0][RTW89_MKK][14] = 58, @@ -48513,6 +50605,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][14] = 46, [2][0][RTW89_CHILE][14] = 68, [2][0][RTW89_QATAR][14] = 52, + [2][0][RTW89_THAILAND][14] = 52, [2][0][RTW89_FCC][15] = 68, [2][0][RTW89_ETSI][15] = 52, [2][0][RTW89_MKK][15] = 68, @@ -48525,6 +50618,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][15] = 46, [2][0][RTW89_CHILE][15] = 68, [2][0][RTW89_QATAR][15] = 52, + [2][0][RTW89_THAILAND][15] = 52, [2][0][RTW89_FCC][17] = 68, [2][0][RTW89_ETSI][17] = 52, [2][0][RTW89_MKK][17] = 74, @@ -48537,6 +50631,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][17] = 46, [2][0][RTW89_CHILE][17] = 68, [2][0][RTW89_QATAR][17] = 52, + [2][0][RTW89_THAILAND][17] = 52, [2][0][RTW89_FCC][19] = 70, [2][0][RTW89_ETSI][19] = 52, [2][0][RTW89_MKK][19] = 74, @@ -48549,6 +50644,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][19] = 46, [2][0][RTW89_CHILE][19] = 70, [2][0][RTW89_QATAR][19] = 52, + [2][0][RTW89_THAILAND][19] = 52, [2][0][RTW89_FCC][21] = 70, [2][0][RTW89_ETSI][21] = 52, [2][0][RTW89_MKK][21] = 74, @@ -48561,6 +50657,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][21] = 46, [2][0][RTW89_CHILE][21] = 70, [2][0][RTW89_QATAR][21] = 52, + [2][0][RTW89_THAILAND][21] = 52, [2][0][RTW89_FCC][23] = 70, [2][0][RTW89_ETSI][23] = 52, [2][0][RTW89_MKK][23] = 74, @@ -48573,6 +50670,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][23] = 46, [2][0][RTW89_CHILE][23] = 70, [2][0][RTW89_QATAR][23] = 52, + [2][0][RTW89_THAILAND][23] = 52, [2][0][RTW89_FCC][25] = 70, [2][0][RTW89_ETSI][25] = 52, [2][0][RTW89_MKK][25] = 74, @@ -48585,6 +50683,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][25] = 46, [2][0][RTW89_CHILE][25] = 70, [2][0][RTW89_QATAR][25] = 52, + [2][0][RTW89_THAILAND][25] = 52, [2][0][RTW89_FCC][27] = 70, [2][0][RTW89_ETSI][27] = 52, [2][0][RTW89_MKK][27] = 74, @@ -48597,6 +50696,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][27] = 46, [2][0][RTW89_CHILE][27] = 70, [2][0][RTW89_QATAR][27] = 52, + [2][0][RTW89_THAILAND][27] = 52, [2][0][RTW89_FCC][29] = 70, [2][0][RTW89_ETSI][29] = 52, [2][0][RTW89_MKK][29] = 74, @@ -48609,6 +50709,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][29] = 46, [2][0][RTW89_CHILE][29] = 70, [2][0][RTW89_QATAR][29] = 52, + [2][0][RTW89_THAILAND][29] = 52, [2][0][RTW89_FCC][31] = 70, [2][0][RTW89_ETSI][31] = 52, [2][0][RTW89_MKK][31] = 74, @@ -48621,6 +50722,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][31] = 46, [2][0][RTW89_CHILE][31] = 70, [2][0][RTW89_QATAR][31] = 52, + [2][0][RTW89_THAILAND][31] = 52, [2][0][RTW89_FCC][33] = 62, [2][0][RTW89_ETSI][33] = 52, [2][0][RTW89_MKK][33] = 74, @@ -48633,6 +50735,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][33] = 46, [2][0][RTW89_CHILE][33] = 62, [2][0][RTW89_QATAR][33] = 52, + [2][0][RTW89_THAILAND][33] = 52, [2][0][RTW89_FCC][35] = 62, [2][0][RTW89_ETSI][35] = 52, [2][0][RTW89_MKK][35] = 74, @@ -48645,6 +50748,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][35] = 46, [2][0][RTW89_CHILE][35] = 62, [2][0][RTW89_QATAR][35] = 52, + [2][0][RTW89_THAILAND][35] = 52, [2][0][RTW89_FCC][37] = 70, [2][0][RTW89_ETSI][37] = 127, [2][0][RTW89_MKK][37] = 74, @@ -48657,66 +50761,72 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][37] = 127, [2][0][RTW89_CHILE][37] = 70, [2][0][RTW89_QATAR][37] = 127, + [2][0][RTW89_THAILAND][37] = 127, [2][0][RTW89_FCC][38] = 82, [2][0][RTW89_ETSI][38] = 28, [2][0][RTW89_MKK][38] = 127, [2][0][RTW89_IC][38] = 82, [2][0][RTW89_KCC][38] = 60, [2][0][RTW89_ACMA][38] = 82, - [2][0][RTW89_CN][38] = 68, + [2][0][RTW89_CN][38] = 56, [2][0][RTW89_UK][38] = 54, [2][0][RTW89_MEXICO][38] = 82, [2][0][RTW89_UKRAINE][38] = 26, [2][0][RTW89_CHILE][38] = 82, [2][0][RTW89_QATAR][38] = 26, + [2][0][RTW89_THAILAND][38] = 28, [2][0][RTW89_FCC][40] = 82, [2][0][RTW89_ETSI][40] = 28, [2][0][RTW89_MKK][40] = 127, [2][0][RTW89_IC][40] = 82, [2][0][RTW89_KCC][40] = 60, [2][0][RTW89_ACMA][40] = 82, - [2][0][RTW89_CN][40] = 68, + [2][0][RTW89_CN][40] = 56, [2][0][RTW89_UK][40] = 54, [2][0][RTW89_MEXICO][40] = 82, [2][0][RTW89_UKRAINE][40] = 26, [2][0][RTW89_CHILE][40] = 82, [2][0][RTW89_QATAR][40] = 26, + [2][0][RTW89_THAILAND][40] = 28, [2][0][RTW89_FCC][42] = 76, [2][0][RTW89_ETSI][42] = 28, [2][0][RTW89_MKK][42] = 127, [2][0][RTW89_IC][42] = 76, [2][0][RTW89_KCC][42] = 60, [2][0][RTW89_ACMA][42] = 76, - [2][0][RTW89_CN][42] = 68, + [2][0][RTW89_CN][42] = 56, [2][0][RTW89_UK][42] = 54, [2][0][RTW89_MEXICO][42] = 76, [2][0][RTW89_UKRAINE][42] = 26, [2][0][RTW89_CHILE][42] = 76, [2][0][RTW89_QATAR][42] = 26, + [2][0][RTW89_THAILAND][42] = 28, [2][0][RTW89_FCC][44] = 80, [2][0][RTW89_ETSI][44] = 28, [2][0][RTW89_MKK][44] = 127, [2][0][RTW89_IC][44] = 80, [2][0][RTW89_KCC][44] = 60, [2][0][RTW89_ACMA][44] = 80, - [2][0][RTW89_CN][44] = 68, + [2][0][RTW89_CN][44] = 56, [2][0][RTW89_UK][44] = 54, [2][0][RTW89_MEXICO][44] = 80, [2][0][RTW89_UKRAINE][44] = 26, [2][0][RTW89_CHILE][44] = 80, [2][0][RTW89_QATAR][44] = 26, + [2][0][RTW89_THAILAND][44] = 28, [2][0][RTW89_FCC][46] = 80, [2][0][RTW89_ETSI][46] = 28, [2][0][RTW89_MKK][46] = 127, [2][0][RTW89_IC][46] = 80, [2][0][RTW89_KCC][46] = 60, [2][0][RTW89_ACMA][46] = 80, - [2][0][RTW89_CN][46] = 68, + [2][0][RTW89_CN][46] = 56, [2][0][RTW89_UK][46] = 54, [2][0][RTW89_MEXICO][46] = 80, [2][0][RTW89_UKRAINE][46] = 26, [2][0][RTW89_CHILE][46] = 80, [2][0][RTW89_QATAR][46] = 26, + [2][0][RTW89_THAILAND][46] = 28, [2][0][RTW89_FCC][48] = 64, [2][0][RTW89_ETSI][48] = 127, [2][0][RTW89_MKK][48] = 127, @@ -48729,6 +50839,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][48] = 127, [2][0][RTW89_CHILE][48] = 127, [2][0][RTW89_QATAR][48] = 127, + [2][0][RTW89_THAILAND][48] = 127, [2][0][RTW89_FCC][50] = 64, [2][0][RTW89_ETSI][50] = 127, [2][0][RTW89_MKK][50] = 127, @@ -48741,6 +50852,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][50] = 127, [2][0][RTW89_CHILE][50] = 127, [2][0][RTW89_QATAR][50] = 127, + [2][0][RTW89_THAILAND][50] = 127, [2][0][RTW89_FCC][52] = 64, [2][0][RTW89_ETSI][52] = 127, [2][0][RTW89_MKK][52] = 127, @@ -48753,6 +50865,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][52] = 127, [2][0][RTW89_CHILE][52] = 127, [2][0][RTW89_QATAR][52] = 127, + [2][0][RTW89_THAILAND][52] = 127, [2][1][RTW89_FCC][0] = 50, [2][1][RTW89_ETSI][0] = 40, [2][1][RTW89_MKK][0] = 44, @@ -48765,6 +50878,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][0] = 34, [2][1][RTW89_CHILE][0] = 50, [2][1][RTW89_QATAR][0] = 40, + [2][1][RTW89_THAILAND][0] = 40, [2][1][RTW89_FCC][2] = 50, [2][1][RTW89_ETSI][2] = 40, [2][1][RTW89_MKK][2] = 44, @@ -48777,6 +50891,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][2] = 34, [2][1][RTW89_CHILE][2] = 50, [2][1][RTW89_QATAR][2] = 40, + [2][1][RTW89_THAILAND][2] = 40, [2][1][RTW89_FCC][4] = 50, [2][1][RTW89_ETSI][4] = 40, [2][1][RTW89_MKK][4] = 36, @@ -48789,6 +50904,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][4] = 34, [2][1][RTW89_CHILE][4] = 50, [2][1][RTW89_QATAR][4] = 40, + [2][1][RTW89_THAILAND][4] = 40, [2][1][RTW89_FCC][6] = 50, [2][1][RTW89_ETSI][6] = 40, [2][1][RTW89_MKK][6] = 36, @@ -48801,6 +50917,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][6] = 34, [2][1][RTW89_CHILE][6] = 50, [2][1][RTW89_QATAR][6] = 40, + [2][1][RTW89_THAILAND][6] = 40, [2][1][RTW89_FCC][8] = 50, [2][1][RTW89_ETSI][8] = 40, [2][1][RTW89_MKK][8] = 32, @@ -48813,6 +50930,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][8] = 34, [2][1][RTW89_CHILE][8] = 50, [2][1][RTW89_QATAR][8] = 40, + [2][1][RTW89_THAILAND][8] = 40, [2][1][RTW89_FCC][10] = 50, [2][1][RTW89_ETSI][10] = 40, [2][1][RTW89_MKK][10] = 32, @@ -48825,6 +50943,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][10] = 34, [2][1][RTW89_CHILE][10] = 50, [2][1][RTW89_QATAR][10] = 40, + [2][1][RTW89_THAILAND][10] = 40, [2][1][RTW89_FCC][12] = 48, [2][1][RTW89_ETSI][12] = 40, [2][1][RTW89_MKK][12] = 44, @@ -48837,6 +50956,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][12] = 34, [2][1][RTW89_CHILE][12] = 48, [2][1][RTW89_QATAR][12] = 40, + [2][1][RTW89_THAILAND][12] = 40, [2][1][RTW89_FCC][14] = 48, [2][1][RTW89_ETSI][14] = 40, [2][1][RTW89_MKK][14] = 44, @@ -48849,6 +50969,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][14] = 34, [2][1][RTW89_CHILE][14] = 48, [2][1][RTW89_QATAR][14] = 40, + [2][1][RTW89_THAILAND][14] = 40, [2][1][RTW89_FCC][15] = 50, [2][1][RTW89_ETSI][15] = 40, [2][1][RTW89_MKK][15] = 66, @@ -48861,6 +50982,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][15] = 34, [2][1][RTW89_CHILE][15] = 50, [2][1][RTW89_QATAR][15] = 40, + [2][1][RTW89_THAILAND][15] = 40, [2][1][RTW89_FCC][17] = 50, [2][1][RTW89_ETSI][17] = 40, [2][1][RTW89_MKK][17] = 66, @@ -48873,6 +50995,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][17] = 34, [2][1][RTW89_CHILE][17] = 50, [2][1][RTW89_QATAR][17] = 40, + [2][1][RTW89_THAILAND][17] = 40, [2][1][RTW89_FCC][19] = 50, [2][1][RTW89_ETSI][19] = 40, [2][1][RTW89_MKK][19] = 66, @@ -48885,6 +51008,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][19] = 34, [2][1][RTW89_CHILE][19] = 50, [2][1][RTW89_QATAR][19] = 40, + [2][1][RTW89_THAILAND][19] = 40, [2][1][RTW89_FCC][21] = 50, [2][1][RTW89_ETSI][21] = 40, [2][1][RTW89_MKK][21] = 66, @@ -48897,6 +51021,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][21] = 34, [2][1][RTW89_CHILE][21] = 50, [2][1][RTW89_QATAR][21] = 40, + [2][1][RTW89_THAILAND][21] = 40, [2][1][RTW89_FCC][23] = 50, [2][1][RTW89_ETSI][23] = 40, [2][1][RTW89_MKK][23] = 66, @@ -48909,6 +51034,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][23] = 34, [2][1][RTW89_CHILE][23] = 50, [2][1][RTW89_QATAR][23] = 40, + [2][1][RTW89_THAILAND][23] = 40, [2][1][RTW89_FCC][25] = 50, [2][1][RTW89_ETSI][25] = 40, [2][1][RTW89_MKK][25] = 66, @@ -48921,6 +51047,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][25] = 34, [2][1][RTW89_CHILE][25] = 50, [2][1][RTW89_QATAR][25] = 40, + [2][1][RTW89_THAILAND][25] = 40, [2][1][RTW89_FCC][27] = 50, [2][1][RTW89_ETSI][27] = 40, [2][1][RTW89_MKK][27] = 66, @@ -48933,6 +51060,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][27] = 34, [2][1][RTW89_CHILE][27] = 50, [2][1][RTW89_QATAR][27] = 40, + [2][1][RTW89_THAILAND][27] = 40, [2][1][RTW89_FCC][29] = 50, [2][1][RTW89_ETSI][29] = 40, [2][1][RTW89_MKK][29] = 66, @@ -48945,6 +51073,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][29] = 34, [2][1][RTW89_CHILE][29] = 50, [2][1][RTW89_QATAR][29] = 40, + [2][1][RTW89_THAILAND][29] = 40, [2][1][RTW89_FCC][31] = 50, [2][1][RTW89_ETSI][31] = 40, [2][1][RTW89_MKK][31] = 66, @@ -48957,6 +51086,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][31] = 34, [2][1][RTW89_CHILE][31] = 50, [2][1][RTW89_QATAR][31] = 40, + [2][1][RTW89_THAILAND][31] = 40, [2][1][RTW89_FCC][33] = 48, [2][1][RTW89_ETSI][33] = 40, [2][1][RTW89_MKK][33] = 66, @@ -48969,6 +51099,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][33] = 34, [2][1][RTW89_CHILE][33] = 48, [2][1][RTW89_QATAR][33] = 40, + [2][1][RTW89_THAILAND][33] = 40, [2][1][RTW89_FCC][35] = 48, [2][1][RTW89_ETSI][35] = 40, [2][1][RTW89_MKK][35] = 66, @@ -48981,6 +51112,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][35] = 34, [2][1][RTW89_CHILE][35] = 48, [2][1][RTW89_QATAR][35] = 40, + [2][1][RTW89_THAILAND][35] = 40, [2][1][RTW89_FCC][37] = 52, [2][1][RTW89_ETSI][37] = 127, [2][1][RTW89_MKK][37] = 66, @@ -48993,6 +51125,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][37] = 127, [2][1][RTW89_CHILE][37] = 52, [2][1][RTW89_QATAR][37] = 127, + [2][1][RTW89_THAILAND][37] = 127, [2][1][RTW89_FCC][38] = 78, [2][1][RTW89_ETSI][38] = 16, [2][1][RTW89_MKK][38] = 127, @@ -49005,6 +51138,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][38] = 14, [2][1][RTW89_CHILE][38] = 72, [2][1][RTW89_QATAR][38] = 14, + [2][1][RTW89_THAILAND][38] = 16, [2][1][RTW89_FCC][40] = 78, [2][1][RTW89_ETSI][40] = 16, [2][1][RTW89_MKK][40] = 127, @@ -49017,6 +51151,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][40] = 14, [2][1][RTW89_CHILE][40] = 72, [2][1][RTW89_QATAR][40] = 14, + [2][1][RTW89_THAILAND][40] = 16, [2][1][RTW89_FCC][42] = 78, [2][1][RTW89_ETSI][42] = 16, [2][1][RTW89_MKK][42] = 127, @@ -49029,6 +51164,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][42] = 14, [2][1][RTW89_CHILE][42] = 72, [2][1][RTW89_QATAR][42] = 14, + [2][1][RTW89_THAILAND][42] = 16, [2][1][RTW89_FCC][44] = 74, [2][1][RTW89_ETSI][44] = 16, [2][1][RTW89_MKK][44] = 127, @@ -49041,6 +51177,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][44] = 14, [2][1][RTW89_CHILE][44] = 72, [2][1][RTW89_QATAR][44] = 14, + [2][1][RTW89_THAILAND][44] = 16, [2][1][RTW89_FCC][46] = 74, [2][1][RTW89_ETSI][46] = 16, [2][1][RTW89_MKK][46] = 127, @@ -49053,6 +51190,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][46] = 14, [2][1][RTW89_CHILE][46] = 72, [2][1][RTW89_QATAR][46] = 14, + [2][1][RTW89_THAILAND][46] = 16, [2][1][RTW89_FCC][48] = 40, [2][1][RTW89_ETSI][48] = 127, [2][1][RTW89_MKK][48] = 127, @@ -49065,6 +51203,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][48] = 127, [2][1][RTW89_CHILE][48] = 127, [2][1][RTW89_QATAR][48] = 127, + [2][1][RTW89_THAILAND][48] = 127, [2][1][RTW89_FCC][50] = 40, [2][1][RTW89_ETSI][50] = 127, [2][1][RTW89_MKK][50] = 127, @@ -49077,6 +51216,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][50] = 127, [2][1][RTW89_CHILE][50] = 127, [2][1][RTW89_QATAR][50] = 127, + [2][1][RTW89_THAILAND][50] = 127, [2][1][RTW89_FCC][52] = 40, [2][1][RTW89_ETSI][52] = 127, [2][1][RTW89_MKK][52] = 127, @@ -49089,6 +51229,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][52] = 127, [2][1][RTW89_CHILE][52] = 127, [2][1][RTW89_QATAR][52] = 127, + [2][1][RTW89_THAILAND][52] = 127, }; static @@ -49169,19 +51310,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][2][44] = 56, [0][0][RTW89_WW][0][45] = -16, [0][0][RTW89_WW][1][45] = -16, - [0][0][RTW89_WW][2][45] = 0, + [0][0][RTW89_WW][2][45] = 56, [0][0][RTW89_WW][0][47] = -18, [0][0][RTW89_WW][1][47] = -18, - [0][0][RTW89_WW][2][47] = 0, + [0][0][RTW89_WW][2][47] = 56, [0][0][RTW89_WW][0][49] = -18, [0][0][RTW89_WW][1][49] = -18, - [0][0][RTW89_WW][2][49] = 0, + [0][0][RTW89_WW][2][49] = 56, [0][0][RTW89_WW][0][51] = -18, [0][0][RTW89_WW][1][51] = -18, - [0][0][RTW89_WW][2][51] = 0, + [0][0][RTW89_WW][2][51] = 56, [0][0][RTW89_WW][0][53] = -16, [0][0][RTW89_WW][1][53] = -16, - [0][0][RTW89_WW][2][53] = 0, + [0][0][RTW89_WW][2][53] = 56, [0][0][RTW89_WW][0][55] = -18, [0][0][RTW89_WW][1][55] = -18, [0][0][RTW89_WW][2][55] = 56, @@ -49361,19 +51502,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_WW][2][44] = 32, [0][1][RTW89_WW][0][45] = -40, [0][1][RTW89_WW][1][45] = -40, - [0][1][RTW89_WW][2][45] = 0, + [0][1][RTW89_WW][2][45] = 32, [0][1][RTW89_WW][0][47] = -40, [0][1][RTW89_WW][1][47] = -40, - [0][1][RTW89_WW][2][47] = 0, + [0][1][RTW89_WW][2][47] = 32, [0][1][RTW89_WW][0][49] = -40, [0][1][RTW89_WW][1][49] = -40, - [0][1][RTW89_WW][2][49] = 0, + [0][1][RTW89_WW][2][49] = 32, [0][1][RTW89_WW][0][51] = -40, [0][1][RTW89_WW][1][51] = -40, - [0][1][RTW89_WW][2][51] = 0, + [0][1][RTW89_WW][2][51] = 32, [0][1][RTW89_WW][0][53] = -40, [0][1][RTW89_WW][1][53] = -40, - [0][1][RTW89_WW][2][53] = 0, + [0][1][RTW89_WW][2][53] = 32, [0][1][RTW89_WW][0][55] = -40, [0][1][RTW89_WW][1][55] = -40, [0][1][RTW89_WW][2][55] = 30, @@ -49553,19 +51694,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][2][44] = 66, [1][0][RTW89_WW][0][45] = -4, [1][0][RTW89_WW][1][45] = -4, - [1][0][RTW89_WW][2][45] = 0, + [1][0][RTW89_WW][2][45] = 68, [1][0][RTW89_WW][0][47] = -4, [1][0][RTW89_WW][1][47] = -4, - [1][0][RTW89_WW][2][47] = 0, + [1][0][RTW89_WW][2][47] = 68, [1][0][RTW89_WW][0][49] = -4, [1][0][RTW89_WW][1][49] = -4, - [1][0][RTW89_WW][2][49] = 0, + [1][0][RTW89_WW][2][49] = 68, [1][0][RTW89_WW][0][51] = -4, [1][0][RTW89_WW][1][51] = -4, - [1][0][RTW89_WW][2][51] = 0, + [1][0][RTW89_WW][2][51] = 68, [1][0][RTW89_WW][0][53] = -4, [1][0][RTW89_WW][1][53] = -4, - [1][0][RTW89_WW][2][53] = 0, + [1][0][RTW89_WW][2][53] = 68, [1][0][RTW89_WW][0][55] = -4, [1][0][RTW89_WW][1][55] = -4, [1][0][RTW89_WW][2][55] = 68, @@ -49745,19 +51886,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][2][44] = 44, [1][1][RTW89_WW][0][45] = -26, [1][1][RTW89_WW][1][45] = -26, - [1][1][RTW89_WW][2][45] = 0, + [1][1][RTW89_WW][2][45] = 44, [1][1][RTW89_WW][0][47] = -28, [1][1][RTW89_WW][1][47] = -28, - [1][1][RTW89_WW][2][47] = 0, + [1][1][RTW89_WW][2][47] = 44, [1][1][RTW89_WW][0][49] = -28, [1][1][RTW89_WW][1][49] = -28, - [1][1][RTW89_WW][2][49] = 0, + [1][1][RTW89_WW][2][49] = 44, [1][1][RTW89_WW][0][51] = -28, [1][1][RTW89_WW][1][51] = -28, - [1][1][RTW89_WW][2][51] = 0, + [1][1][RTW89_WW][2][51] = 44, [1][1][RTW89_WW][0][53] = -26, [1][1][RTW89_WW][1][53] = -26, - [1][1][RTW89_WW][2][53] = 0, + [1][1][RTW89_WW][2][53] = 44, [1][1][RTW89_WW][0][55] = -28, [1][1][RTW89_WW][1][55] = -28, [1][1][RTW89_WW][2][55] = 44, @@ -49901,106 +52042,106 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][2][21] = 60, [2][0][RTW89_WW][0][23] = -2, [2][0][RTW89_WW][1][23] = -2, - [2][0][RTW89_WW][2][23] = 78, + [2][0][RTW89_WW][2][23] = 70, [2][0][RTW89_WW][0][25] = -2, [2][0][RTW89_WW][1][25] = -2, - [2][0][RTW89_WW][2][25] = 78, + [2][0][RTW89_WW][2][25] = 70, [2][0][RTW89_WW][0][27] = -2, [2][0][RTW89_WW][1][27] = -2, - [2][0][RTW89_WW][2][27] = 78, + [2][0][RTW89_WW][2][27] = 70, [2][0][RTW89_WW][0][29] = -2, [2][0][RTW89_WW][1][29] = -2, - [2][0][RTW89_WW][2][29] = 78, + [2][0][RTW89_WW][2][29] = 70, [2][0][RTW89_WW][0][30] = -2, [2][0][RTW89_WW][1][30] = -2, - [2][0][RTW89_WW][2][30] = 78, + [2][0][RTW89_WW][2][30] = 70, [2][0][RTW89_WW][0][32] = -2, [2][0][RTW89_WW][1][32] = -2, - [2][0][RTW89_WW][2][32] = 78, + [2][0][RTW89_WW][2][32] = 70, [2][0][RTW89_WW][0][34] = -2, [2][0][RTW89_WW][1][34] = -2, - [2][0][RTW89_WW][2][34] = 78, + [2][0][RTW89_WW][2][34] = 70, [2][0][RTW89_WW][0][36] = -2, [2][0][RTW89_WW][1][36] = -2, - [2][0][RTW89_WW][2][36] = 78, + [2][0][RTW89_WW][2][36] = 70, [2][0][RTW89_WW][0][38] = -2, [2][0][RTW89_WW][1][38] = -2, - [2][0][RTW89_WW][2][38] = 78, + [2][0][RTW89_WW][2][38] = 70, [2][0][RTW89_WW][0][40] = -2, [2][0][RTW89_WW][1][40] = -2, - [2][0][RTW89_WW][2][40] = 78, + [2][0][RTW89_WW][2][40] = 70, [2][0][RTW89_WW][0][42] = -2, [2][0][RTW89_WW][1][42] = -2, - [2][0][RTW89_WW][2][42] = 78, + [2][0][RTW89_WW][2][42] = 70, [2][0][RTW89_WW][0][44] = -2, [2][0][RTW89_WW][1][44] = -2, - [2][0][RTW89_WW][2][44] = 78, + [2][0][RTW89_WW][2][44] = 70, [2][0][RTW89_WW][0][45] = -2, [2][0][RTW89_WW][1][45] = -2, - [2][0][RTW89_WW][2][45] = 0, + [2][0][RTW89_WW][2][45] = 70, [2][0][RTW89_WW][0][47] = -2, [2][0][RTW89_WW][1][47] = -2, - [2][0][RTW89_WW][2][47] = 0, + [2][0][RTW89_WW][2][47] = 70, [2][0][RTW89_WW][0][49] = -2, [2][0][RTW89_WW][1][49] = -2, - [2][0][RTW89_WW][2][49] = 0, + [2][0][RTW89_WW][2][49] = 70, [2][0][RTW89_WW][0][51] = -2, [2][0][RTW89_WW][1][51] = -2, - [2][0][RTW89_WW][2][51] = 0, + [2][0][RTW89_WW][2][51] = 70, [2][0][RTW89_WW][0][53] = -2, [2][0][RTW89_WW][1][53] = -2, - [2][0][RTW89_WW][2][53] = 0, + [2][0][RTW89_WW][2][53] = 70, [2][0][RTW89_WW][0][55] = -2, [2][0][RTW89_WW][1][55] = -2, - [2][0][RTW89_WW][2][55] = 78, + [2][0][RTW89_WW][2][55] = 68, [2][0][RTW89_WW][0][57] = -2, [2][0][RTW89_WW][1][57] = -2, - [2][0][RTW89_WW][2][57] = 78, + [2][0][RTW89_WW][2][57] = 68, [2][0][RTW89_WW][0][59] = -2, [2][0][RTW89_WW][1][59] = -2, - [2][0][RTW89_WW][2][59] = 78, + [2][0][RTW89_WW][2][59] = 68, [2][0][RTW89_WW][0][60] = -2, [2][0][RTW89_WW][1][60] = -2, - [2][0][RTW89_WW][2][60] = 78, + [2][0][RTW89_WW][2][60] = 68, [2][0][RTW89_WW][0][62] = -2, [2][0][RTW89_WW][1][62] = -2, - [2][0][RTW89_WW][2][62] = 78, + [2][0][RTW89_WW][2][62] = 68, [2][0][RTW89_WW][0][64] = -2, [2][0][RTW89_WW][1][64] = -2, - [2][0][RTW89_WW][2][64] = 78, + [2][0][RTW89_WW][2][64] = 68, [2][0][RTW89_WW][0][66] = -2, [2][0][RTW89_WW][1][66] = -2, - [2][0][RTW89_WW][2][66] = 78, + [2][0][RTW89_WW][2][66] = 68, [2][0][RTW89_WW][0][68] = -2, [2][0][RTW89_WW][1][68] = -2, - [2][0][RTW89_WW][2][68] = 78, + [2][0][RTW89_WW][2][68] = 68, [2][0][RTW89_WW][0][70] = -2, [2][0][RTW89_WW][1][70] = -2, - [2][0][RTW89_WW][2][70] = 78, + [2][0][RTW89_WW][2][70] = 68, [2][0][RTW89_WW][0][72] = -2, [2][0][RTW89_WW][1][72] = -2, - [2][0][RTW89_WW][2][72] = 78, + [2][0][RTW89_WW][2][72] = 68, [2][0][RTW89_WW][0][74] = -2, [2][0][RTW89_WW][1][74] = -2, - [2][0][RTW89_WW][2][74] = 78, + [2][0][RTW89_WW][2][74] = 68, [2][0][RTW89_WW][0][75] = -2, [2][0][RTW89_WW][1][75] = -2, - [2][0][RTW89_WW][2][75] = 78, + [2][0][RTW89_WW][2][75] = 68, [2][0][RTW89_WW][0][77] = -2, [2][0][RTW89_WW][1][77] = -2, - [2][0][RTW89_WW][2][77] = 78, + [2][0][RTW89_WW][2][77] = 68, [2][0][RTW89_WW][0][79] = -2, [2][0][RTW89_WW][1][79] = -2, - [2][0][RTW89_WW][2][79] = 78, + [2][0][RTW89_WW][2][79] = 68, [2][0][RTW89_WW][0][81] = -2, [2][0][RTW89_WW][1][81] = -2, - [2][0][RTW89_WW][2][81] = 78, + [2][0][RTW89_WW][2][81] = 68, [2][0][RTW89_WW][0][83] = -2, [2][0][RTW89_WW][1][83] = -2, - [2][0][RTW89_WW][2][83] = 78, + [2][0][RTW89_WW][2][83] = 68, [2][0][RTW89_WW][0][85] = -2, [2][0][RTW89_WW][1][85] = -2, - [2][0][RTW89_WW][2][85] = 78, + [2][0][RTW89_WW][2][85] = 68, [2][0][RTW89_WW][0][87] = -2, [2][0][RTW89_WW][1][87] = -2, [2][0][RTW89_WW][2][87] = 0, @@ -50129,19 +52270,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][2][44] = 54, [2][1][RTW89_WW][0][45] = -16, [2][1][RTW89_WW][1][45] = -16, - [2][1][RTW89_WW][2][45] = 0, + [2][1][RTW89_WW][2][45] = 56, [2][1][RTW89_WW][0][47] = -16, [2][1][RTW89_WW][1][47] = -16, - [2][1][RTW89_WW][2][47] = 0, + [2][1][RTW89_WW][2][47] = 56, [2][1][RTW89_WW][0][49] = -16, [2][1][RTW89_WW][1][49] = -16, - [2][1][RTW89_WW][2][49] = 0, + [2][1][RTW89_WW][2][49] = 56, [2][1][RTW89_WW][0][51] = -16, [2][1][RTW89_WW][1][51] = -16, - [2][1][RTW89_WW][2][51] = 0, + [2][1][RTW89_WW][2][51] = 56, [2][1][RTW89_WW][0][53] = -16, [2][1][RTW89_WW][1][53] = -16, - [2][1][RTW89_WW][2][53] = 0, + [2][1][RTW89_WW][2][53] = 56, [2][1][RTW89_WW][0][55] = -16, [2][1][RTW89_WW][1][55] = -16, [2][1][RTW89_WW][2][55] = 54, @@ -50254,6 +52395,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][0] = 30, [0][0][RTW89_MKK][0][0] = -8, [0][0][RTW89_IC][1][0] = -16, + [0][0][RTW89_IC][2][0] = 44, [0][0][RTW89_KCC][1][0] = -2, [0][0][RTW89_KCC][0][0] = -2, [0][0][RTW89_ACMA][1][0] = 32, @@ -50263,6 +52405,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][0] = -8, [0][0][RTW89_UK][1][0] = 32, [0][0][RTW89_UK][0][0] = -8, + [0][0][RTW89_THAILAND][1][0] = 30, + [0][0][RTW89_THAILAND][0][0] = -16, [0][0][RTW89_FCC][1][2] = -18, [0][0][RTW89_FCC][2][2] = 44, [0][0][RTW89_ETSI][1][2] = 32, @@ -50270,6 +52414,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][2] = 30, [0][0][RTW89_MKK][0][2] = -8, [0][0][RTW89_IC][1][2] = -18, + [0][0][RTW89_IC][2][2] = 44, [0][0][RTW89_KCC][1][2] = -2, [0][0][RTW89_KCC][0][2] = -2, [0][0][RTW89_ACMA][1][2] = 32, @@ -50279,6 +52424,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][2] = -8, [0][0][RTW89_UK][1][2] = 32, [0][0][RTW89_UK][0][2] = -8, + [0][0][RTW89_THAILAND][1][2] = 30, + [0][0][RTW89_THAILAND][0][2] = -18, [0][0][RTW89_FCC][1][4] = -18, [0][0][RTW89_FCC][2][4] = 44, [0][0][RTW89_ETSI][1][4] = 32, @@ -50286,6 +52433,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][4] = 30, [0][0][RTW89_MKK][0][4] = -8, [0][0][RTW89_IC][1][4] = -18, + [0][0][RTW89_IC][2][4] = 44, [0][0][RTW89_KCC][1][4] = -2, [0][0][RTW89_KCC][0][4] = -2, [0][0][RTW89_ACMA][1][4] = 32, @@ -50295,6 +52443,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][4] = -8, [0][0][RTW89_UK][1][4] = 32, [0][0][RTW89_UK][0][4] = -8, + [0][0][RTW89_THAILAND][1][4] = 30, + [0][0][RTW89_THAILAND][0][4] = -18, [0][0][RTW89_FCC][1][6] = -18, [0][0][RTW89_FCC][2][6] = 44, [0][0][RTW89_ETSI][1][6] = 32, @@ -50302,6 +52452,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][6] = 30, [0][0][RTW89_MKK][0][6] = -8, [0][0][RTW89_IC][1][6] = -18, + [0][0][RTW89_IC][2][6] = 44, [0][0][RTW89_KCC][1][6] = -2, [0][0][RTW89_KCC][0][6] = -2, [0][0][RTW89_ACMA][1][6] = 32, @@ -50311,6 +52462,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][6] = -8, [0][0][RTW89_UK][1][6] = 32, [0][0][RTW89_UK][0][6] = -8, + [0][0][RTW89_THAILAND][1][6] = 30, + [0][0][RTW89_THAILAND][0][6] = -18, [0][0][RTW89_FCC][1][8] = -18, [0][0][RTW89_FCC][2][8] = 44, [0][0][RTW89_ETSI][1][8] = 32, @@ -50318,6 +52471,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][8] = 30, [0][0][RTW89_MKK][0][8] = -8, [0][0][RTW89_IC][1][8] = -18, + [0][0][RTW89_IC][2][8] = 44, [0][0][RTW89_KCC][1][8] = -2, [0][0][RTW89_KCC][0][8] = -2, [0][0][RTW89_ACMA][1][8] = 32, @@ -50327,6 +52481,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][8] = -8, [0][0][RTW89_UK][1][8] = 32, [0][0][RTW89_UK][0][8] = -8, + [0][0][RTW89_THAILAND][1][8] = 30, + [0][0][RTW89_THAILAND][0][8] = -18, [0][0][RTW89_FCC][1][10] = -18, [0][0][RTW89_FCC][2][10] = 44, [0][0][RTW89_ETSI][1][10] = 32, @@ -50334,6 +52490,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][10] = 30, [0][0][RTW89_MKK][0][10] = -8, [0][0][RTW89_IC][1][10] = -18, + [0][0][RTW89_IC][2][10] = 44, [0][0][RTW89_KCC][1][10] = -2, [0][0][RTW89_KCC][0][10] = -2, [0][0][RTW89_ACMA][1][10] = 32, @@ -50343,6 +52500,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][10] = -8, [0][0][RTW89_UK][1][10] = 32, [0][0][RTW89_UK][0][10] = -8, + [0][0][RTW89_THAILAND][1][10] = 30, + [0][0][RTW89_THAILAND][0][10] = -18, [0][0][RTW89_FCC][1][12] = -18, [0][0][RTW89_FCC][2][12] = 44, [0][0][RTW89_ETSI][1][12] = 32, @@ -50350,6 +52509,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][12] = 30, [0][0][RTW89_MKK][0][12] = -8, [0][0][RTW89_IC][1][12] = -18, + [0][0][RTW89_IC][2][12] = 44, [0][0][RTW89_KCC][1][12] = -2, [0][0][RTW89_KCC][0][12] = -2, [0][0][RTW89_ACMA][1][12] = 32, @@ -50359,6 +52519,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][12] = -8, [0][0][RTW89_UK][1][12] = 32, [0][0][RTW89_UK][0][12] = -8, + [0][0][RTW89_THAILAND][1][12] = 30, + [0][0][RTW89_THAILAND][0][12] = -18, [0][0][RTW89_FCC][1][14] = -18, [0][0][RTW89_FCC][2][14] = 44, [0][0][RTW89_ETSI][1][14] = 32, @@ -50366,6 +52528,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][14] = 30, [0][0][RTW89_MKK][0][14] = -8, [0][0][RTW89_IC][1][14] = -18, + [0][0][RTW89_IC][2][14] = 44, [0][0][RTW89_KCC][1][14] = -2, [0][0][RTW89_KCC][0][14] = -2, [0][0][RTW89_ACMA][1][14] = 32, @@ -50375,6 +52538,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][14] = -8, [0][0][RTW89_UK][1][14] = 32, [0][0][RTW89_UK][0][14] = -8, + [0][0][RTW89_THAILAND][1][14] = 30, + [0][0][RTW89_THAILAND][0][14] = -18, [0][0][RTW89_FCC][1][15] = -18, [0][0][RTW89_FCC][2][15] = 44, [0][0][RTW89_ETSI][1][15] = 32, @@ -50382,6 +52547,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][15] = 30, [0][0][RTW89_MKK][0][15] = -8, [0][0][RTW89_IC][1][15] = -18, + [0][0][RTW89_IC][2][15] = 44, [0][0][RTW89_KCC][1][15] = -2, [0][0][RTW89_KCC][0][15] = -2, [0][0][RTW89_ACMA][1][15] = 32, @@ -50391,6 +52557,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][15] = -8, [0][0][RTW89_UK][1][15] = 32, [0][0][RTW89_UK][0][15] = -8, + [0][0][RTW89_THAILAND][1][15] = 30, + [0][0][RTW89_THAILAND][0][15] = -18, [0][0][RTW89_FCC][1][17] = -18, [0][0][RTW89_FCC][2][17] = 44, [0][0][RTW89_ETSI][1][17] = 32, @@ -50398,6 +52566,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][17] = 30, [0][0][RTW89_MKK][0][17] = -8, [0][0][RTW89_IC][1][17] = -18, + [0][0][RTW89_IC][2][17] = 44, [0][0][RTW89_KCC][1][17] = -2, [0][0][RTW89_KCC][0][17] = -2, [0][0][RTW89_ACMA][1][17] = 32, @@ -50407,6 +52576,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][17] = -8, [0][0][RTW89_UK][1][17] = 32, [0][0][RTW89_UK][0][17] = -8, + [0][0][RTW89_THAILAND][1][17] = 30, + [0][0][RTW89_THAILAND][0][17] = -18, [0][0][RTW89_FCC][1][19] = -18, [0][0][RTW89_FCC][2][19] = 44, [0][0][RTW89_ETSI][1][19] = 32, @@ -50414,6 +52585,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][19] = 30, [0][0][RTW89_MKK][0][19] = -8, [0][0][RTW89_IC][1][19] = -18, + [0][0][RTW89_IC][2][19] = 44, [0][0][RTW89_KCC][1][19] = -2, [0][0][RTW89_KCC][0][19] = -2, [0][0][RTW89_ACMA][1][19] = 32, @@ -50423,6 +52595,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][19] = -8, [0][0][RTW89_UK][1][19] = 32, [0][0][RTW89_UK][0][19] = -8, + [0][0][RTW89_THAILAND][1][19] = 30, + [0][0][RTW89_THAILAND][0][19] = -18, [0][0][RTW89_FCC][1][21] = -18, [0][0][RTW89_FCC][2][21] = 44, [0][0][RTW89_ETSI][1][21] = 32, @@ -50430,6 +52604,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][21] = 30, [0][0][RTW89_MKK][0][21] = -8, [0][0][RTW89_IC][1][21] = -18, + [0][0][RTW89_IC][2][21] = 44, [0][0][RTW89_KCC][1][21] = -2, [0][0][RTW89_KCC][0][21] = -2, [0][0][RTW89_ACMA][1][21] = 32, @@ -50439,6 +52614,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][21] = -8, [0][0][RTW89_UK][1][21] = 32, [0][0][RTW89_UK][0][21] = -8, + [0][0][RTW89_THAILAND][1][21] = 30, + [0][0][RTW89_THAILAND][0][21] = -18, [0][0][RTW89_FCC][1][23] = -18, [0][0][RTW89_FCC][2][23] = 54, [0][0][RTW89_ETSI][1][23] = 32, @@ -50446,6 +52623,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][23] = 30, [0][0][RTW89_MKK][0][23] = -8, [0][0][RTW89_IC][1][23] = -18, + [0][0][RTW89_IC][2][23] = 54, [0][0][RTW89_KCC][1][23] = -2, [0][0][RTW89_KCC][0][23] = -2, [0][0][RTW89_ACMA][1][23] = 32, @@ -50455,6 +52633,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][23] = -8, [0][0][RTW89_UK][1][23] = 32, [0][0][RTW89_UK][0][23] = -8, + [0][0][RTW89_THAILAND][1][23] = 30, + [0][0][RTW89_THAILAND][0][23] = -18, [0][0][RTW89_FCC][1][25] = -18, [0][0][RTW89_FCC][2][25] = 54, [0][0][RTW89_ETSI][1][25] = 32, @@ -50462,6 +52642,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][25] = 30, [0][0][RTW89_MKK][0][25] = -8, [0][0][RTW89_IC][1][25] = -18, + [0][0][RTW89_IC][2][25] = 54, [0][0][RTW89_KCC][1][25] = -2, [0][0][RTW89_KCC][0][25] = -2, [0][0][RTW89_ACMA][1][25] = 32, @@ -50471,6 +52652,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][25] = -8, [0][0][RTW89_UK][1][25] = 32, [0][0][RTW89_UK][0][25] = -8, + [0][0][RTW89_THAILAND][1][25] = 30, + [0][0][RTW89_THAILAND][0][25] = -18, [0][0][RTW89_FCC][1][27] = -18, [0][0][RTW89_FCC][2][27] = 54, [0][0][RTW89_ETSI][1][27] = 32, @@ -50478,6 +52661,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][27] = 30, [0][0][RTW89_MKK][0][27] = -8, [0][0][RTW89_IC][1][27] = -18, + [0][0][RTW89_IC][2][27] = 54, [0][0][RTW89_KCC][1][27] = -2, [0][0][RTW89_KCC][0][27] = -2, [0][0][RTW89_ACMA][1][27] = 32, @@ -50487,6 +52671,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][27] = -8, [0][0][RTW89_UK][1][27] = 32, [0][0][RTW89_UK][0][27] = -8, + [0][0][RTW89_THAILAND][1][27] = 30, + [0][0][RTW89_THAILAND][0][27] = -18, [0][0][RTW89_FCC][1][29] = -18, [0][0][RTW89_FCC][2][29] = 54, [0][0][RTW89_ETSI][1][29] = 32, @@ -50494,6 +52680,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][29] = 30, [0][0][RTW89_MKK][0][29] = -8, [0][0][RTW89_IC][1][29] = -18, + [0][0][RTW89_IC][2][29] = 54, [0][0][RTW89_KCC][1][29] = -2, [0][0][RTW89_KCC][0][29] = -2, [0][0][RTW89_ACMA][1][29] = 32, @@ -50503,6 +52690,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][29] = -8, [0][0][RTW89_UK][1][29] = 32, [0][0][RTW89_UK][0][29] = -8, + [0][0][RTW89_THAILAND][1][29] = 30, + [0][0][RTW89_THAILAND][0][29] = -18, [0][0][RTW89_FCC][1][30] = -18, [0][0][RTW89_FCC][2][30] = 54, [0][0][RTW89_ETSI][1][30] = 32, @@ -50510,6 +52699,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][30] = 30, [0][0][RTW89_MKK][0][30] = -8, [0][0][RTW89_IC][1][30] = -18, + [0][0][RTW89_IC][2][30] = 54, [0][0][RTW89_KCC][1][30] = -2, [0][0][RTW89_KCC][0][30] = -2, [0][0][RTW89_ACMA][1][30] = 32, @@ -50519,6 +52709,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][30] = -8, [0][0][RTW89_UK][1][30] = 32, [0][0][RTW89_UK][0][30] = -8, + [0][0][RTW89_THAILAND][1][30] = 30, + [0][0][RTW89_THAILAND][0][30] = -18, [0][0][RTW89_FCC][1][32] = -18, [0][0][RTW89_FCC][2][32] = 54, [0][0][RTW89_ETSI][1][32] = 32, @@ -50526,6 +52718,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][32] = 30, [0][0][RTW89_MKK][0][32] = -8, [0][0][RTW89_IC][1][32] = -18, + [0][0][RTW89_IC][2][32] = 54, [0][0][RTW89_KCC][1][32] = -2, [0][0][RTW89_KCC][0][32] = -2, [0][0][RTW89_ACMA][1][32] = 32, @@ -50535,6 +52728,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][32] = -8, [0][0][RTW89_UK][1][32] = 32, [0][0][RTW89_UK][0][32] = -8, + [0][0][RTW89_THAILAND][1][32] = 30, + [0][0][RTW89_THAILAND][0][32] = -18, [0][0][RTW89_FCC][1][34] = -18, [0][0][RTW89_FCC][2][34] = 54, [0][0][RTW89_ETSI][1][34] = 32, @@ -50542,6 +52737,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][34] = 30, [0][0][RTW89_MKK][0][34] = -8, [0][0][RTW89_IC][1][34] = -18, + [0][0][RTW89_IC][2][34] = 54, [0][0][RTW89_KCC][1][34] = -2, [0][0][RTW89_KCC][0][34] = -2, [0][0][RTW89_ACMA][1][34] = 32, @@ -50551,6 +52747,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][34] = -8, [0][0][RTW89_UK][1][34] = 32, [0][0][RTW89_UK][0][34] = -8, + [0][0][RTW89_THAILAND][1][34] = 30, + [0][0][RTW89_THAILAND][0][34] = -18, [0][0][RTW89_FCC][1][36] = -18, [0][0][RTW89_FCC][2][36] = 54, [0][0][RTW89_ETSI][1][36] = 32, @@ -50558,6 +52756,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][36] = 30, [0][0][RTW89_MKK][0][36] = -8, [0][0][RTW89_IC][1][36] = -18, + [0][0][RTW89_IC][2][36] = 54, [0][0][RTW89_KCC][1][36] = -2, [0][0][RTW89_KCC][0][36] = -2, [0][0][RTW89_ACMA][1][36] = 32, @@ -50567,6 +52766,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][36] = -8, [0][0][RTW89_UK][1][36] = 32, [0][0][RTW89_UK][0][36] = -8, + [0][0][RTW89_THAILAND][1][36] = 30, + [0][0][RTW89_THAILAND][0][36] = -18, [0][0][RTW89_FCC][1][38] = -18, [0][0][RTW89_FCC][2][38] = 54, [0][0][RTW89_ETSI][1][38] = 32, @@ -50574,6 +52775,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][38] = 30, [0][0][RTW89_MKK][0][38] = -8, [0][0][RTW89_IC][1][38] = -18, + [0][0][RTW89_IC][2][38] = 54, [0][0][RTW89_KCC][1][38] = -2, [0][0][RTW89_KCC][0][38] = -2, [0][0][RTW89_ACMA][1][38] = 32, @@ -50583,6 +52785,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][38] = -8, [0][0][RTW89_UK][1][38] = 32, [0][0][RTW89_UK][0][38] = -8, + [0][0][RTW89_THAILAND][1][38] = 30, + [0][0][RTW89_THAILAND][0][38] = -18, [0][0][RTW89_FCC][1][40] = -18, [0][0][RTW89_FCC][2][40] = 54, [0][0][RTW89_ETSI][1][40] = 32, @@ -50590,6 +52794,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][40] = 30, [0][0][RTW89_MKK][0][40] = -8, [0][0][RTW89_IC][1][40] = -18, + [0][0][RTW89_IC][2][40] = 54, [0][0][RTW89_KCC][1][40] = -2, [0][0][RTW89_KCC][0][40] = -2, [0][0][RTW89_ACMA][1][40] = 32, @@ -50599,6 +52804,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][40] = -8, [0][0][RTW89_UK][1][40] = 32, [0][0][RTW89_UK][0][40] = -8, + [0][0][RTW89_THAILAND][1][40] = 30, + [0][0][RTW89_THAILAND][0][40] = -18, [0][0][RTW89_FCC][1][42] = -18, [0][0][RTW89_FCC][2][42] = 54, [0][0][RTW89_ETSI][1][42] = 32, @@ -50606,6 +52813,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][42] = 30, [0][0][RTW89_MKK][0][42] = -8, [0][0][RTW89_IC][1][42] = -18, + [0][0][RTW89_IC][2][42] = 54, [0][0][RTW89_KCC][1][42] = -2, [0][0][RTW89_KCC][0][42] = -2, [0][0][RTW89_ACMA][1][42] = 32, @@ -50615,6 +52823,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][42] = -8, [0][0][RTW89_UK][1][42] = 32, [0][0][RTW89_UK][0][42] = -8, + [0][0][RTW89_THAILAND][1][42] = 30, + [0][0][RTW89_THAILAND][0][42] = -18, [0][0][RTW89_FCC][1][44] = -16, [0][0][RTW89_FCC][2][44] = 56, [0][0][RTW89_ETSI][1][44] = 32, @@ -50622,6 +52832,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][44] = 8, [0][0][RTW89_MKK][0][44] = -10, [0][0][RTW89_IC][1][44] = -16, + [0][0][RTW89_IC][2][44] = 56, [0][0][RTW89_KCC][1][44] = -2, [0][0][RTW89_KCC][0][44] = -2, [0][0][RTW89_ACMA][1][44] = 32, @@ -50631,6 +52842,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][44] = -6, [0][0][RTW89_UK][1][44] = 32, [0][0][RTW89_UK][0][44] = -6, + [0][0][RTW89_THAILAND][1][44] = 30, + [0][0][RTW89_THAILAND][0][44] = -16, [0][0][RTW89_FCC][1][45] = -16, [0][0][RTW89_FCC][2][45] = 127, [0][0][RTW89_ETSI][1][45] = 127, @@ -50638,6 +52851,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][45] = 127, [0][0][RTW89_MKK][0][45] = 127, [0][0][RTW89_IC][1][45] = -16, + [0][0][RTW89_IC][2][45] = 56, [0][0][RTW89_KCC][1][45] = -2, [0][0][RTW89_KCC][0][45] = 127, [0][0][RTW89_ACMA][1][45] = 127, @@ -50647,6 +52861,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][45] = 127, [0][0][RTW89_UK][1][45] = 127, [0][0][RTW89_UK][0][45] = 127, + [0][0][RTW89_THAILAND][1][45] = 127, + [0][0][RTW89_THAILAND][0][45] = 127, [0][0][RTW89_FCC][1][47] = -18, [0][0][RTW89_FCC][2][47] = 127, [0][0][RTW89_ETSI][1][47] = 127, @@ -50654,6 +52870,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][47] = 127, [0][0][RTW89_MKK][0][47] = 127, [0][0][RTW89_IC][1][47] = -18, + [0][0][RTW89_IC][2][47] = 56, [0][0][RTW89_KCC][1][47] = -2, [0][0][RTW89_KCC][0][47] = 127, [0][0][RTW89_ACMA][1][47] = 127, @@ -50663,6 +52880,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][47] = 127, [0][0][RTW89_UK][1][47] = 127, [0][0][RTW89_UK][0][47] = 127, + [0][0][RTW89_THAILAND][1][47] = 127, + [0][0][RTW89_THAILAND][0][47] = 127, [0][0][RTW89_FCC][1][49] = -18, [0][0][RTW89_FCC][2][49] = 127, [0][0][RTW89_ETSI][1][49] = 127, @@ -50670,6 +52889,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][49] = 127, [0][0][RTW89_MKK][0][49] = 127, [0][0][RTW89_IC][1][49] = -18, + [0][0][RTW89_IC][2][49] = 56, [0][0][RTW89_KCC][1][49] = -2, [0][0][RTW89_KCC][0][49] = 127, [0][0][RTW89_ACMA][1][49] = 127, @@ -50679,6 +52899,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][49] = 127, [0][0][RTW89_UK][1][49] = 127, [0][0][RTW89_UK][0][49] = 127, + [0][0][RTW89_THAILAND][1][49] = 127, + [0][0][RTW89_THAILAND][0][49] = 127, [0][0][RTW89_FCC][1][51] = -18, [0][0][RTW89_FCC][2][51] = 127, [0][0][RTW89_ETSI][1][51] = 127, @@ -50686,6 +52908,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][51] = 127, [0][0][RTW89_MKK][0][51] = 127, [0][0][RTW89_IC][1][51] = -18, + [0][0][RTW89_IC][2][51] = 56, [0][0][RTW89_KCC][1][51] = -2, [0][0][RTW89_KCC][0][51] = 127, [0][0][RTW89_ACMA][1][51] = 127, @@ -50695,6 +52918,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][51] = 127, [0][0][RTW89_UK][1][51] = 127, [0][0][RTW89_UK][0][51] = 127, + [0][0][RTW89_THAILAND][1][51] = 127, + [0][0][RTW89_THAILAND][0][51] = 127, [0][0][RTW89_FCC][1][53] = -16, [0][0][RTW89_FCC][2][53] = 127, [0][0][RTW89_ETSI][1][53] = 127, @@ -50702,6 +52927,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][53] = 127, [0][0][RTW89_MKK][0][53] = 127, [0][0][RTW89_IC][1][53] = -16, + [0][0][RTW89_IC][2][53] = 56, [0][0][RTW89_KCC][1][53] = -2, [0][0][RTW89_KCC][0][53] = 127, [0][0][RTW89_ACMA][1][53] = 127, @@ -50711,6 +52937,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][53] = 127, [0][0][RTW89_UK][1][53] = 127, [0][0][RTW89_UK][0][53] = 127, + [0][0][RTW89_THAILAND][1][53] = 127, + [0][0][RTW89_THAILAND][0][53] = 127, [0][0][RTW89_FCC][1][55] = -18, [0][0][RTW89_FCC][2][55] = 56, [0][0][RTW89_ETSI][1][55] = 127, @@ -50718,6 +52946,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][55] = 127, [0][0][RTW89_MKK][0][55] = 127, [0][0][RTW89_IC][1][55] = -18, + [0][0][RTW89_IC][2][55] = 56, [0][0][RTW89_KCC][1][55] = -2, [0][0][RTW89_KCC][0][55] = 127, [0][0][RTW89_ACMA][1][55] = 127, @@ -50727,6 +52956,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][55] = 127, [0][0][RTW89_UK][1][55] = 127, [0][0][RTW89_UK][0][55] = 127, + [0][0][RTW89_THAILAND][1][55] = 127, + [0][0][RTW89_THAILAND][0][55] = 127, [0][0][RTW89_FCC][1][57] = -18, [0][0][RTW89_FCC][2][57] = 56, [0][0][RTW89_ETSI][1][57] = 127, @@ -50734,6 +52965,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][57] = 127, [0][0][RTW89_MKK][0][57] = 127, [0][0][RTW89_IC][1][57] = -18, + [0][0][RTW89_IC][2][57] = 56, [0][0][RTW89_KCC][1][57] = -2, [0][0][RTW89_KCC][0][57] = 127, [0][0][RTW89_ACMA][1][57] = 127, @@ -50743,6 +52975,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][57] = 127, [0][0][RTW89_UK][1][57] = 127, [0][0][RTW89_UK][0][57] = 127, + [0][0][RTW89_THAILAND][1][57] = 127, + [0][0][RTW89_THAILAND][0][57] = 127, [0][0][RTW89_FCC][1][59] = -18, [0][0][RTW89_FCC][2][59] = 56, [0][0][RTW89_ETSI][1][59] = 127, @@ -50750,6 +52984,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][59] = 127, [0][0][RTW89_MKK][0][59] = 127, [0][0][RTW89_IC][1][59] = -18, + [0][0][RTW89_IC][2][59] = 56, [0][0][RTW89_KCC][1][59] = -2, [0][0][RTW89_KCC][0][59] = 127, [0][0][RTW89_ACMA][1][59] = 127, @@ -50759,6 +52994,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][59] = 127, [0][0][RTW89_UK][1][59] = 127, [0][0][RTW89_UK][0][59] = 127, + [0][0][RTW89_THAILAND][1][59] = 127, + [0][0][RTW89_THAILAND][0][59] = 127, [0][0][RTW89_FCC][1][60] = -18, [0][0][RTW89_FCC][2][60] = 56, [0][0][RTW89_ETSI][1][60] = 127, @@ -50766,6 +53003,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][60] = 127, [0][0][RTW89_MKK][0][60] = 127, [0][0][RTW89_IC][1][60] = -18, + [0][0][RTW89_IC][2][60] = 56, [0][0][RTW89_KCC][1][60] = -2, [0][0][RTW89_KCC][0][60] = 127, [0][0][RTW89_ACMA][1][60] = 127, @@ -50775,6 +53013,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][60] = 127, [0][0][RTW89_UK][1][60] = 127, [0][0][RTW89_UK][0][60] = 127, + [0][0][RTW89_THAILAND][1][60] = 127, + [0][0][RTW89_THAILAND][0][60] = 127, [0][0][RTW89_FCC][1][62] = -18, [0][0][RTW89_FCC][2][62] = 56, [0][0][RTW89_ETSI][1][62] = 127, @@ -50782,6 +53022,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][62] = 127, [0][0][RTW89_MKK][0][62] = 127, [0][0][RTW89_IC][1][62] = -18, + [0][0][RTW89_IC][2][62] = 56, [0][0][RTW89_KCC][1][62] = -2, [0][0][RTW89_KCC][0][62] = 127, [0][0][RTW89_ACMA][1][62] = 127, @@ -50791,6 +53032,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][62] = 127, [0][0][RTW89_UK][1][62] = 127, [0][0][RTW89_UK][0][62] = 127, + [0][0][RTW89_THAILAND][1][62] = 127, + [0][0][RTW89_THAILAND][0][62] = 127, [0][0][RTW89_FCC][1][64] = -18, [0][0][RTW89_FCC][2][64] = 56, [0][0][RTW89_ETSI][1][64] = 127, @@ -50798,6 +53041,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][64] = 127, [0][0][RTW89_MKK][0][64] = 127, [0][0][RTW89_IC][1][64] = -18, + [0][0][RTW89_IC][2][64] = 56, [0][0][RTW89_KCC][1][64] = -2, [0][0][RTW89_KCC][0][64] = 127, [0][0][RTW89_ACMA][1][64] = 127, @@ -50807,6 +53051,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][64] = 127, [0][0][RTW89_UK][1][64] = 127, [0][0][RTW89_UK][0][64] = 127, + [0][0][RTW89_THAILAND][1][64] = 127, + [0][0][RTW89_THAILAND][0][64] = 127, [0][0][RTW89_FCC][1][66] = -18, [0][0][RTW89_FCC][2][66] = 56, [0][0][RTW89_ETSI][1][66] = 127, @@ -50814,6 +53060,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][66] = 127, [0][0][RTW89_MKK][0][66] = 127, [0][0][RTW89_IC][1][66] = -18, + [0][0][RTW89_IC][2][66] = 56, [0][0][RTW89_KCC][1][66] = -2, [0][0][RTW89_KCC][0][66] = 127, [0][0][RTW89_ACMA][1][66] = 127, @@ -50823,6 +53070,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][66] = 127, [0][0][RTW89_UK][1][66] = 127, [0][0][RTW89_UK][0][66] = 127, + [0][0][RTW89_THAILAND][1][66] = 127, + [0][0][RTW89_THAILAND][0][66] = 127, [0][0][RTW89_FCC][1][68] = -18, [0][0][RTW89_FCC][2][68] = 56, [0][0][RTW89_ETSI][1][68] = 127, @@ -50830,6 +53079,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][68] = 127, [0][0][RTW89_MKK][0][68] = 127, [0][0][RTW89_IC][1][68] = -18, + [0][0][RTW89_IC][2][68] = 56, [0][0][RTW89_KCC][1][68] = -2, [0][0][RTW89_KCC][0][68] = 127, [0][0][RTW89_ACMA][1][68] = 127, @@ -50839,6 +53089,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][68] = 127, [0][0][RTW89_UK][1][68] = 127, [0][0][RTW89_UK][0][68] = 127, + [0][0][RTW89_THAILAND][1][68] = 127, + [0][0][RTW89_THAILAND][0][68] = 127, [0][0][RTW89_FCC][1][70] = -16, [0][0][RTW89_FCC][2][70] = 56, [0][0][RTW89_ETSI][1][70] = 127, @@ -50846,6 +53098,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][70] = 127, [0][0][RTW89_MKK][0][70] = 127, [0][0][RTW89_IC][1][70] = -16, + [0][0][RTW89_IC][2][70] = 56, [0][0][RTW89_KCC][1][70] = -2, [0][0][RTW89_KCC][0][70] = 127, [0][0][RTW89_ACMA][1][70] = 127, @@ -50855,6 +53108,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][70] = 127, [0][0][RTW89_UK][1][70] = 127, [0][0][RTW89_UK][0][70] = 127, + [0][0][RTW89_THAILAND][1][70] = 127, + [0][0][RTW89_THAILAND][0][70] = 127, [0][0][RTW89_FCC][1][72] = -18, [0][0][RTW89_FCC][2][72] = 56, [0][0][RTW89_ETSI][1][72] = 127, @@ -50862,6 +53117,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][72] = 127, [0][0][RTW89_MKK][0][72] = 127, [0][0][RTW89_IC][1][72] = -18, + [0][0][RTW89_IC][2][72] = 56, [0][0][RTW89_KCC][1][72] = -2, [0][0][RTW89_KCC][0][72] = 127, [0][0][RTW89_ACMA][1][72] = 127, @@ -50871,6 +53127,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][72] = 127, [0][0][RTW89_UK][1][72] = 127, [0][0][RTW89_UK][0][72] = 127, + [0][0][RTW89_THAILAND][1][72] = 127, + [0][0][RTW89_THAILAND][0][72] = 127, [0][0][RTW89_FCC][1][74] = -18, [0][0][RTW89_FCC][2][74] = 56, [0][0][RTW89_ETSI][1][74] = 127, @@ -50878,6 +53136,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][74] = 127, [0][0][RTW89_MKK][0][74] = 127, [0][0][RTW89_IC][1][74] = -18, + [0][0][RTW89_IC][2][74] = 56, [0][0][RTW89_KCC][1][74] = -2, [0][0][RTW89_KCC][0][74] = 127, [0][0][RTW89_ACMA][1][74] = 127, @@ -50887,6 +53146,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][74] = 127, [0][0][RTW89_UK][1][74] = 127, [0][0][RTW89_UK][0][74] = 127, + [0][0][RTW89_THAILAND][1][74] = 127, + [0][0][RTW89_THAILAND][0][74] = 127, [0][0][RTW89_FCC][1][75] = -18, [0][0][RTW89_FCC][2][75] = 56, [0][0][RTW89_ETSI][1][75] = 127, @@ -50894,6 +53155,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][75] = 127, [0][0][RTW89_MKK][0][75] = 127, [0][0][RTW89_IC][1][75] = -18, + [0][0][RTW89_IC][2][75] = 56, [0][0][RTW89_KCC][1][75] = -2, [0][0][RTW89_KCC][0][75] = 127, [0][0][RTW89_ACMA][1][75] = 127, @@ -50903,6 +53165,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][75] = 127, [0][0][RTW89_UK][1][75] = 127, [0][0][RTW89_UK][0][75] = 127, + [0][0][RTW89_THAILAND][1][75] = 127, + [0][0][RTW89_THAILAND][0][75] = 127, [0][0][RTW89_FCC][1][77] = -18, [0][0][RTW89_FCC][2][77] = 56, [0][0][RTW89_ETSI][1][77] = 127, @@ -50910,6 +53174,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][77] = 127, [0][0][RTW89_MKK][0][77] = 127, [0][0][RTW89_IC][1][77] = -18, + [0][0][RTW89_IC][2][77] = 56, [0][0][RTW89_KCC][1][77] = -2, [0][0][RTW89_KCC][0][77] = 127, [0][0][RTW89_ACMA][1][77] = 127, @@ -50919,6 +53184,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][77] = 127, [0][0][RTW89_UK][1][77] = 127, [0][0][RTW89_UK][0][77] = 127, + [0][0][RTW89_THAILAND][1][77] = 127, + [0][0][RTW89_THAILAND][0][77] = 127, [0][0][RTW89_FCC][1][79] = -18, [0][0][RTW89_FCC][2][79] = 56, [0][0][RTW89_ETSI][1][79] = 127, @@ -50926,6 +53193,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][79] = 127, [0][0][RTW89_MKK][0][79] = 127, [0][0][RTW89_IC][1][79] = -18, + [0][0][RTW89_IC][2][79] = 56, [0][0][RTW89_KCC][1][79] = -2, [0][0][RTW89_KCC][0][79] = 127, [0][0][RTW89_ACMA][1][79] = 127, @@ -50935,6 +53203,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][79] = 127, [0][0][RTW89_UK][1][79] = 127, [0][0][RTW89_UK][0][79] = 127, + [0][0][RTW89_THAILAND][1][79] = 127, + [0][0][RTW89_THAILAND][0][79] = 127, [0][0][RTW89_FCC][1][81] = -18, [0][0][RTW89_FCC][2][81] = 56, [0][0][RTW89_ETSI][1][81] = 127, @@ -50942,6 +53212,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][81] = 127, [0][0][RTW89_MKK][0][81] = 127, [0][0][RTW89_IC][1][81] = -18, + [0][0][RTW89_IC][2][81] = 56, [0][0][RTW89_KCC][1][81] = -2, [0][0][RTW89_KCC][0][81] = 127, [0][0][RTW89_ACMA][1][81] = 127, @@ -50951,6 +53222,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][81] = 127, [0][0][RTW89_UK][1][81] = 127, [0][0][RTW89_UK][0][81] = 127, + [0][0][RTW89_THAILAND][1][81] = 127, + [0][0][RTW89_THAILAND][0][81] = 127, [0][0][RTW89_FCC][1][83] = -18, [0][0][RTW89_FCC][2][83] = 56, [0][0][RTW89_ETSI][1][83] = 127, @@ -50958,6 +53231,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][83] = 127, [0][0][RTW89_MKK][0][83] = 127, [0][0][RTW89_IC][1][83] = -18, + [0][0][RTW89_IC][2][83] = 56, [0][0][RTW89_KCC][1][83] = -2, [0][0][RTW89_KCC][0][83] = 127, [0][0][RTW89_ACMA][1][83] = 127, @@ -50967,6 +53241,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][83] = 127, [0][0][RTW89_UK][1][83] = 127, [0][0][RTW89_UK][0][83] = 127, + [0][0][RTW89_THAILAND][1][83] = 127, + [0][0][RTW89_THAILAND][0][83] = 127, [0][0][RTW89_FCC][1][85] = -18, [0][0][RTW89_FCC][2][85] = 56, [0][0][RTW89_ETSI][1][85] = 127, @@ -50974,6 +53250,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][85] = 127, [0][0][RTW89_MKK][0][85] = 127, [0][0][RTW89_IC][1][85] = -18, + [0][0][RTW89_IC][2][85] = 56, [0][0][RTW89_KCC][1][85] = -2, [0][0][RTW89_KCC][0][85] = 127, [0][0][RTW89_ACMA][1][85] = 127, @@ -50983,6 +53260,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][85] = 127, [0][0][RTW89_UK][1][85] = 127, [0][0][RTW89_UK][0][85] = 127, + [0][0][RTW89_THAILAND][1][85] = 127, + [0][0][RTW89_THAILAND][0][85] = 127, [0][0][RTW89_FCC][1][87] = -16, [0][0][RTW89_FCC][2][87] = 127, [0][0][RTW89_ETSI][1][87] = 127, @@ -50990,6 +53269,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][87] = 127, [0][0][RTW89_MKK][0][87] = 127, [0][0][RTW89_IC][1][87] = -16, + [0][0][RTW89_IC][2][87] = 127, [0][0][RTW89_KCC][1][87] = -2, [0][0][RTW89_KCC][0][87] = 127, [0][0][RTW89_ACMA][1][87] = 127, @@ -50999,6 +53279,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][87] = 127, [0][0][RTW89_UK][1][87] = 127, [0][0][RTW89_UK][0][87] = 127, + [0][0][RTW89_THAILAND][1][87] = 127, + [0][0][RTW89_THAILAND][0][87] = 127, [0][0][RTW89_FCC][1][89] = -16, [0][0][RTW89_FCC][2][89] = 127, [0][0][RTW89_ETSI][1][89] = 127, @@ -51006,6 +53288,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][89] = 127, [0][0][RTW89_MKK][0][89] = 127, [0][0][RTW89_IC][1][89] = -16, + [0][0][RTW89_IC][2][89] = 127, [0][0][RTW89_KCC][1][89] = -2, [0][0][RTW89_KCC][0][89] = 127, [0][0][RTW89_ACMA][1][89] = 127, @@ -51015,6 +53298,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][89] = 127, [0][0][RTW89_UK][1][89] = 127, [0][0][RTW89_UK][0][89] = 127, + [0][0][RTW89_THAILAND][1][89] = 127, + [0][0][RTW89_THAILAND][0][89] = 127, [0][0][RTW89_FCC][1][90] = -16, [0][0][RTW89_FCC][2][90] = 127, [0][0][RTW89_ETSI][1][90] = 127, @@ -51022,6 +53307,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][90] = 127, [0][0][RTW89_MKK][0][90] = 127, [0][0][RTW89_IC][1][90] = -16, + [0][0][RTW89_IC][2][90] = 127, [0][0][RTW89_KCC][1][90] = -2, [0][0][RTW89_KCC][0][90] = 127, [0][0][RTW89_ACMA][1][90] = 127, @@ -51031,6 +53317,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][90] = 127, [0][0][RTW89_UK][1][90] = 127, [0][0][RTW89_UK][0][90] = 127, + [0][0][RTW89_THAILAND][1][90] = 127, + [0][0][RTW89_THAILAND][0][90] = 127, [0][0][RTW89_FCC][1][92] = -16, [0][0][RTW89_FCC][2][92] = 127, [0][0][RTW89_ETSI][1][92] = 127, @@ -51038,6 +53326,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][92] = 127, [0][0][RTW89_MKK][0][92] = 127, [0][0][RTW89_IC][1][92] = -16, + [0][0][RTW89_IC][2][92] = 127, [0][0][RTW89_KCC][1][92] = -2, [0][0][RTW89_KCC][0][92] = 127, [0][0][RTW89_ACMA][1][92] = 127, @@ -51047,6 +53336,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][92] = 127, [0][0][RTW89_UK][1][92] = 127, [0][0][RTW89_UK][0][92] = 127, + [0][0][RTW89_THAILAND][1][92] = 127, + [0][0][RTW89_THAILAND][0][92] = 127, [0][0][RTW89_FCC][1][94] = -16, [0][0][RTW89_FCC][2][94] = 127, [0][0][RTW89_ETSI][1][94] = 127, @@ -51054,6 +53345,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][94] = 127, [0][0][RTW89_MKK][0][94] = 127, [0][0][RTW89_IC][1][94] = -16, + [0][0][RTW89_IC][2][94] = 127, [0][0][RTW89_KCC][1][94] = -2, [0][0][RTW89_KCC][0][94] = 127, [0][0][RTW89_ACMA][1][94] = 127, @@ -51063,6 +53355,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][94] = 127, [0][0][RTW89_UK][1][94] = 127, [0][0][RTW89_UK][0][94] = 127, + [0][0][RTW89_THAILAND][1][94] = 127, + [0][0][RTW89_THAILAND][0][94] = 127, [0][0][RTW89_FCC][1][96] = -16, [0][0][RTW89_FCC][2][96] = 127, [0][0][RTW89_ETSI][1][96] = 127, @@ -51070,6 +53364,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][96] = 127, [0][0][RTW89_MKK][0][96] = 127, [0][0][RTW89_IC][1][96] = -16, + [0][0][RTW89_IC][2][96] = 127, [0][0][RTW89_KCC][1][96] = -2, [0][0][RTW89_KCC][0][96] = 127, [0][0][RTW89_ACMA][1][96] = 127, @@ -51079,6 +53374,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][96] = 127, [0][0][RTW89_UK][1][96] = 127, [0][0][RTW89_UK][0][96] = 127, + [0][0][RTW89_THAILAND][1][96] = 127, + [0][0][RTW89_THAILAND][0][96] = 127, [0][0][RTW89_FCC][1][98] = -16, [0][0][RTW89_FCC][2][98] = 127, [0][0][RTW89_ETSI][1][98] = 127, @@ -51086,6 +53383,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][98] = 127, [0][0][RTW89_MKK][0][98] = 127, [0][0][RTW89_IC][1][98] = -16, + [0][0][RTW89_IC][2][98] = 127, [0][0][RTW89_KCC][1][98] = -2, [0][0][RTW89_KCC][0][98] = 127, [0][0][RTW89_ACMA][1][98] = 127, @@ -51095,6 +53393,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][98] = 127, [0][0][RTW89_UK][1][98] = 127, [0][0][RTW89_UK][0][98] = 127, + [0][0][RTW89_THAILAND][1][98] = 127, + [0][0][RTW89_THAILAND][0][98] = 127, [0][0][RTW89_FCC][1][100] = -16, [0][0][RTW89_FCC][2][100] = 127, [0][0][RTW89_ETSI][1][100] = 127, @@ -51102,6 +53402,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][100] = 127, [0][0][RTW89_MKK][0][100] = 127, [0][0][RTW89_IC][1][100] = -16, + [0][0][RTW89_IC][2][100] = 127, [0][0][RTW89_KCC][1][100] = -2, [0][0][RTW89_KCC][0][100] = 127, [0][0][RTW89_ACMA][1][100] = 127, @@ -51111,6 +53412,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][100] = 127, [0][0][RTW89_UK][1][100] = 127, [0][0][RTW89_UK][0][100] = 127, + [0][0][RTW89_THAILAND][1][100] = 127, + [0][0][RTW89_THAILAND][0][100] = 127, [0][0][RTW89_FCC][1][102] = -16, [0][0][RTW89_FCC][2][102] = 127, [0][0][RTW89_ETSI][1][102] = 127, @@ -51118,6 +53421,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][102] = 127, [0][0][RTW89_MKK][0][102] = 127, [0][0][RTW89_IC][1][102] = -16, + [0][0][RTW89_IC][2][102] = 127, [0][0][RTW89_KCC][1][102] = -2, [0][0][RTW89_KCC][0][102] = 127, [0][0][RTW89_ACMA][1][102] = 127, @@ -51127,6 +53431,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][102] = 127, [0][0][RTW89_UK][1][102] = 127, [0][0][RTW89_UK][0][102] = 127, + [0][0][RTW89_THAILAND][1][102] = 127, + [0][0][RTW89_THAILAND][0][102] = 127, [0][0][RTW89_FCC][1][104] = -16, [0][0][RTW89_FCC][2][104] = 127, [0][0][RTW89_ETSI][1][104] = 127, @@ -51134,6 +53440,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][104] = 127, [0][0][RTW89_MKK][0][104] = 127, [0][0][RTW89_IC][1][104] = -16, + [0][0][RTW89_IC][2][104] = 127, [0][0][RTW89_KCC][1][104] = -2, [0][0][RTW89_KCC][0][104] = 127, [0][0][RTW89_ACMA][1][104] = 127, @@ -51143,6 +53450,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][104] = 127, [0][0][RTW89_UK][1][104] = 127, [0][0][RTW89_UK][0][104] = 127, + [0][0][RTW89_THAILAND][1][104] = 127, + [0][0][RTW89_THAILAND][0][104] = 127, [0][0][RTW89_FCC][1][105] = -16, [0][0][RTW89_FCC][2][105] = 127, [0][0][RTW89_ETSI][1][105] = 127, @@ -51150,6 +53459,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][105] = 127, [0][0][RTW89_MKK][0][105] = 127, [0][0][RTW89_IC][1][105] = -16, + [0][0][RTW89_IC][2][105] = 127, [0][0][RTW89_KCC][1][105] = -2, [0][0][RTW89_KCC][0][105] = 127, [0][0][RTW89_ACMA][1][105] = 127, @@ -51159,6 +53469,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][105] = 127, [0][0][RTW89_UK][1][105] = 127, [0][0][RTW89_UK][0][105] = 127, + [0][0][RTW89_THAILAND][1][105] = 127, + [0][0][RTW89_THAILAND][0][105] = 127, [0][0][RTW89_FCC][1][107] = -12, [0][0][RTW89_FCC][2][107] = 127, [0][0][RTW89_ETSI][1][107] = 127, @@ -51166,6 +53478,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][107] = 127, [0][0][RTW89_MKK][0][107] = 127, [0][0][RTW89_IC][1][107] = -12, + [0][0][RTW89_IC][2][107] = 127, [0][0][RTW89_KCC][1][107] = -2, [0][0][RTW89_KCC][0][107] = 127, [0][0][RTW89_ACMA][1][107] = 127, @@ -51175,6 +53488,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][107] = 127, [0][0][RTW89_UK][1][107] = 127, [0][0][RTW89_UK][0][107] = 127, + [0][0][RTW89_THAILAND][1][107] = 127, + [0][0][RTW89_THAILAND][0][107] = 127, [0][0][RTW89_FCC][1][109] = -12, [0][0][RTW89_FCC][2][109] = 127, [0][0][RTW89_ETSI][1][109] = 127, @@ -51182,6 +53497,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][109] = 127, [0][0][RTW89_MKK][0][109] = 127, [0][0][RTW89_IC][1][109] = -12, + [0][0][RTW89_IC][2][109] = 127, [0][0][RTW89_KCC][1][109] = 127, [0][0][RTW89_KCC][0][109] = 127, [0][0][RTW89_ACMA][1][109] = 127, @@ -51191,6 +53507,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][109] = 127, [0][0][RTW89_UK][1][109] = 127, [0][0][RTW89_UK][0][109] = 127, + [0][0][RTW89_THAILAND][1][109] = 127, + [0][0][RTW89_THAILAND][0][109] = 127, [0][0][RTW89_FCC][1][111] = 127, [0][0][RTW89_FCC][2][111] = 127, [0][0][RTW89_ETSI][1][111] = 127, @@ -51198,6 +53516,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][111] = 127, [0][0][RTW89_MKK][0][111] = 127, [0][0][RTW89_IC][1][111] = 127, + [0][0][RTW89_IC][2][111] = 127, [0][0][RTW89_KCC][1][111] = 127, [0][0][RTW89_KCC][0][111] = 127, [0][0][RTW89_ACMA][1][111] = 127, @@ -51207,6 +53526,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][111] = 127, [0][0][RTW89_UK][1][111] = 127, [0][0][RTW89_UK][0][111] = 127, + [0][0][RTW89_THAILAND][1][111] = 127, + [0][0][RTW89_THAILAND][0][111] = 127, [0][0][RTW89_FCC][1][113] = 127, [0][0][RTW89_FCC][2][113] = 127, [0][0][RTW89_ETSI][1][113] = 127, @@ -51214,6 +53535,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][113] = 127, [0][0][RTW89_MKK][0][113] = 127, [0][0][RTW89_IC][1][113] = 127, + [0][0][RTW89_IC][2][113] = 127, [0][0][RTW89_KCC][1][113] = 127, [0][0][RTW89_KCC][0][113] = 127, [0][0][RTW89_ACMA][1][113] = 127, @@ -51223,6 +53545,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][113] = 127, [0][0][RTW89_UK][1][113] = 127, [0][0][RTW89_UK][0][113] = 127, + [0][0][RTW89_THAILAND][1][113] = 127, + [0][0][RTW89_THAILAND][0][113] = 127, [0][0][RTW89_FCC][1][115] = 127, [0][0][RTW89_FCC][2][115] = 127, [0][0][RTW89_ETSI][1][115] = 127, @@ -51230,6 +53554,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][115] = 127, [0][0][RTW89_MKK][0][115] = 127, [0][0][RTW89_IC][1][115] = 127, + [0][0][RTW89_IC][2][115] = 127, [0][0][RTW89_KCC][1][115] = 127, [0][0][RTW89_KCC][0][115] = 127, [0][0][RTW89_ACMA][1][115] = 127, @@ -51239,6 +53564,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][115] = 127, [0][0][RTW89_UK][1][115] = 127, [0][0][RTW89_UK][0][115] = 127, + [0][0][RTW89_THAILAND][1][115] = 127, + [0][0][RTW89_THAILAND][0][115] = 127, [0][0][RTW89_FCC][1][117] = 127, [0][0][RTW89_FCC][2][117] = 127, [0][0][RTW89_ETSI][1][117] = 127, @@ -51246,6 +53573,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][117] = 127, [0][0][RTW89_MKK][0][117] = 127, [0][0][RTW89_IC][1][117] = 127, + [0][0][RTW89_IC][2][117] = 127, [0][0][RTW89_KCC][1][117] = 127, [0][0][RTW89_KCC][0][117] = 127, [0][0][RTW89_ACMA][1][117] = 127, @@ -51255,6 +53583,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][117] = 127, [0][0][RTW89_UK][1][117] = 127, [0][0][RTW89_UK][0][117] = 127, + [0][0][RTW89_THAILAND][1][117] = 127, + [0][0][RTW89_THAILAND][0][117] = 127, [0][0][RTW89_FCC][1][119] = 127, [0][0][RTW89_FCC][2][119] = 127, [0][0][RTW89_ETSI][1][119] = 127, @@ -51262,6 +53592,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][119] = 127, [0][0][RTW89_MKK][0][119] = 127, [0][0][RTW89_IC][1][119] = 127, + [0][0][RTW89_IC][2][119] = 127, [0][0][RTW89_KCC][1][119] = 127, [0][0][RTW89_KCC][0][119] = 127, [0][0][RTW89_ACMA][1][119] = 127, @@ -51271,6 +53602,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][119] = 127, [0][0][RTW89_UK][1][119] = 127, [0][0][RTW89_UK][0][119] = 127, + [0][0][RTW89_THAILAND][1][119] = 127, + [0][0][RTW89_THAILAND][0][119] = 127, [0][1][RTW89_FCC][1][0] = -40, [0][1][RTW89_FCC][2][0] = 32, [0][1][RTW89_ETSI][1][0] = 20, @@ -51278,6 +53611,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][0] = 18, [0][1][RTW89_MKK][0][0] = -20, [0][1][RTW89_IC][1][0] = -40, + [0][1][RTW89_IC][2][0] = 32, [0][1][RTW89_KCC][1][0] = -14, [0][1][RTW89_KCC][0][0] = -14, [0][1][RTW89_ACMA][1][0] = 20, @@ -51287,6 +53621,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][0] = -18, [0][1][RTW89_UK][1][0] = 20, [0][1][RTW89_UK][0][0] = -18, + [0][1][RTW89_THAILAND][1][0] = 6, + [0][1][RTW89_THAILAND][0][0] = -40, [0][1][RTW89_FCC][1][2] = -40, [0][1][RTW89_FCC][2][2] = 32, [0][1][RTW89_ETSI][1][2] = 20, @@ -51294,6 +53630,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][2] = 18, [0][1][RTW89_MKK][0][2] = -22, [0][1][RTW89_IC][1][2] = -40, + [0][1][RTW89_IC][2][2] = 32, [0][1][RTW89_KCC][1][2] = -14, [0][1][RTW89_KCC][0][2] = -14, [0][1][RTW89_ACMA][1][2] = 20, @@ -51303,6 +53640,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][2] = -18, [0][1][RTW89_UK][1][2] = 20, [0][1][RTW89_UK][0][2] = -18, + [0][1][RTW89_THAILAND][1][2] = 6, + [0][1][RTW89_THAILAND][0][2] = -40, [0][1][RTW89_FCC][1][4] = -40, [0][1][RTW89_FCC][2][4] = 32, [0][1][RTW89_ETSI][1][4] = 20, @@ -51310,6 +53649,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][4] = 18, [0][1][RTW89_MKK][0][4] = -22, [0][1][RTW89_IC][1][4] = -40, + [0][1][RTW89_IC][2][4] = 32, [0][1][RTW89_KCC][1][4] = -14, [0][1][RTW89_KCC][0][4] = -14, [0][1][RTW89_ACMA][1][4] = 20, @@ -51319,6 +53659,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][4] = -18, [0][1][RTW89_UK][1][4] = 20, [0][1][RTW89_UK][0][4] = -18, + [0][1][RTW89_THAILAND][1][4] = 6, + [0][1][RTW89_THAILAND][0][4] = -40, [0][1][RTW89_FCC][1][6] = -40, [0][1][RTW89_FCC][2][6] = 32, [0][1][RTW89_ETSI][1][6] = 20, @@ -51326,6 +53668,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][6] = 18, [0][1][RTW89_MKK][0][6] = -22, [0][1][RTW89_IC][1][6] = -40, + [0][1][RTW89_IC][2][6] = 32, [0][1][RTW89_KCC][1][6] = -14, [0][1][RTW89_KCC][0][6] = -14, [0][1][RTW89_ACMA][1][6] = 20, @@ -51335,6 +53678,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][6] = -18, [0][1][RTW89_UK][1][6] = 20, [0][1][RTW89_UK][0][6] = -18, + [0][1][RTW89_THAILAND][1][6] = 6, + [0][1][RTW89_THAILAND][0][6] = -40, [0][1][RTW89_FCC][1][8] = -40, [0][1][RTW89_FCC][2][8] = 32, [0][1][RTW89_ETSI][1][8] = 20, @@ -51342,6 +53687,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][8] = 18, [0][1][RTW89_MKK][0][8] = -22, [0][1][RTW89_IC][1][8] = -40, + [0][1][RTW89_IC][2][8] = 32, [0][1][RTW89_KCC][1][8] = -14, [0][1][RTW89_KCC][0][8] = -14, [0][1][RTW89_ACMA][1][8] = 20, @@ -51351,6 +53697,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][8] = -18, [0][1][RTW89_UK][1][8] = 20, [0][1][RTW89_UK][0][8] = -18, + [0][1][RTW89_THAILAND][1][8] = 6, + [0][1][RTW89_THAILAND][0][8] = -40, [0][1][RTW89_FCC][1][10] = -40, [0][1][RTW89_FCC][2][10] = 32, [0][1][RTW89_ETSI][1][10] = 20, @@ -51358,6 +53706,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][10] = 18, [0][1][RTW89_MKK][0][10] = -22, [0][1][RTW89_IC][1][10] = -40, + [0][1][RTW89_IC][2][10] = 32, [0][1][RTW89_KCC][1][10] = -14, [0][1][RTW89_KCC][0][10] = -14, [0][1][RTW89_ACMA][1][10] = 20, @@ -51367,6 +53716,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][10] = -18, [0][1][RTW89_UK][1][10] = 20, [0][1][RTW89_UK][0][10] = -18, + [0][1][RTW89_THAILAND][1][10] = 6, + [0][1][RTW89_THAILAND][0][10] = -40, [0][1][RTW89_FCC][1][12] = -40, [0][1][RTW89_FCC][2][12] = 32, [0][1][RTW89_ETSI][1][12] = 20, @@ -51374,6 +53725,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][12] = 18, [0][1][RTW89_MKK][0][12] = -22, [0][1][RTW89_IC][1][12] = -40, + [0][1][RTW89_IC][2][12] = 32, [0][1][RTW89_KCC][1][12] = -14, [0][1][RTW89_KCC][0][12] = -14, [0][1][RTW89_ACMA][1][12] = 20, @@ -51383,6 +53735,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][12] = -18, [0][1][RTW89_UK][1][12] = 20, [0][1][RTW89_UK][0][12] = -18, + [0][1][RTW89_THAILAND][1][12] = 6, + [0][1][RTW89_THAILAND][0][12] = -40, [0][1][RTW89_FCC][1][14] = -40, [0][1][RTW89_FCC][2][14] = 32, [0][1][RTW89_ETSI][1][14] = 20, @@ -51390,6 +53744,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][14] = 18, [0][1][RTW89_MKK][0][14] = -22, [0][1][RTW89_IC][1][14] = -40, + [0][1][RTW89_IC][2][14] = 32, [0][1][RTW89_KCC][1][14] = -14, [0][1][RTW89_KCC][0][14] = -14, [0][1][RTW89_ACMA][1][14] = 20, @@ -51399,6 +53754,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][14] = -18, [0][1][RTW89_UK][1][14] = 20, [0][1][RTW89_UK][0][14] = -18, + [0][1][RTW89_THAILAND][1][14] = 6, + [0][1][RTW89_THAILAND][0][14] = -40, [0][1][RTW89_FCC][1][15] = -40, [0][1][RTW89_FCC][2][15] = 32, [0][1][RTW89_ETSI][1][15] = 20, @@ -51406,6 +53763,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][15] = 18, [0][1][RTW89_MKK][0][15] = -22, [0][1][RTW89_IC][1][15] = -40, + [0][1][RTW89_IC][2][15] = 32, [0][1][RTW89_KCC][1][15] = -14, [0][1][RTW89_KCC][0][15] = -14, [0][1][RTW89_ACMA][1][15] = 20, @@ -51415,6 +53773,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][15] = -18, [0][1][RTW89_UK][1][15] = 20, [0][1][RTW89_UK][0][15] = -18, + [0][1][RTW89_THAILAND][1][15] = 6, + [0][1][RTW89_THAILAND][0][15] = -40, [0][1][RTW89_FCC][1][17] = -40, [0][1][RTW89_FCC][2][17] = 32, [0][1][RTW89_ETSI][1][17] = 20, @@ -51422,6 +53782,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][17] = 18, [0][1][RTW89_MKK][0][17] = -22, [0][1][RTW89_IC][1][17] = -40, + [0][1][RTW89_IC][2][17] = 32, [0][1][RTW89_KCC][1][17] = -14, [0][1][RTW89_KCC][0][17] = -14, [0][1][RTW89_ACMA][1][17] = 20, @@ -51431,6 +53792,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][17] = -18, [0][1][RTW89_UK][1][17] = 20, [0][1][RTW89_UK][0][17] = -18, + [0][1][RTW89_THAILAND][1][17] = 6, + [0][1][RTW89_THAILAND][0][17] = -40, [0][1][RTW89_FCC][1][19] = -40, [0][1][RTW89_FCC][2][19] = 32, [0][1][RTW89_ETSI][1][19] = 20, @@ -51438,6 +53801,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][19] = 18, [0][1][RTW89_MKK][0][19] = -22, [0][1][RTW89_IC][1][19] = -40, + [0][1][RTW89_IC][2][19] = 32, [0][1][RTW89_KCC][1][19] = -14, [0][1][RTW89_KCC][0][19] = -14, [0][1][RTW89_ACMA][1][19] = 20, @@ -51447,6 +53811,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][19] = -18, [0][1][RTW89_UK][1][19] = 20, [0][1][RTW89_UK][0][19] = -18, + [0][1][RTW89_THAILAND][1][19] = 6, + [0][1][RTW89_THAILAND][0][19] = -40, [0][1][RTW89_FCC][1][21] = -40, [0][1][RTW89_FCC][2][21] = 32, [0][1][RTW89_ETSI][1][21] = 20, @@ -51454,6 +53820,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][21] = 18, [0][1][RTW89_MKK][0][21] = -22, [0][1][RTW89_IC][1][21] = -40, + [0][1][RTW89_IC][2][21] = 32, [0][1][RTW89_KCC][1][21] = -14, [0][1][RTW89_KCC][0][21] = -14, [0][1][RTW89_ACMA][1][21] = 20, @@ -51463,6 +53830,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][21] = -18, [0][1][RTW89_UK][1][21] = 20, [0][1][RTW89_UK][0][21] = -18, + [0][1][RTW89_THAILAND][1][21] = 6, + [0][1][RTW89_THAILAND][0][21] = -40, [0][1][RTW89_FCC][1][23] = -40, [0][1][RTW89_FCC][2][23] = 32, [0][1][RTW89_ETSI][1][23] = 20, @@ -51470,6 +53839,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][23] = 18, [0][1][RTW89_MKK][0][23] = -22, [0][1][RTW89_IC][1][23] = -40, + [0][1][RTW89_IC][2][23] = 32, [0][1][RTW89_KCC][1][23] = -14, [0][1][RTW89_KCC][0][23] = -14, [0][1][RTW89_ACMA][1][23] = 20, @@ -51479,6 +53849,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][23] = -18, [0][1][RTW89_UK][1][23] = 20, [0][1][RTW89_UK][0][23] = -18, + [0][1][RTW89_THAILAND][1][23] = 6, + [0][1][RTW89_THAILAND][0][23] = -40, [0][1][RTW89_FCC][1][25] = -40, [0][1][RTW89_FCC][2][25] = 32, [0][1][RTW89_ETSI][1][25] = 20, @@ -51486,6 +53858,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][25] = -4, [0][1][RTW89_MKK][0][25] = -22, [0][1][RTW89_IC][1][25] = -40, + [0][1][RTW89_IC][2][25] = 32, [0][1][RTW89_KCC][1][25] = -14, [0][1][RTW89_KCC][0][25] = -14, [0][1][RTW89_ACMA][1][25] = 20, @@ -51495,6 +53868,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][25] = -18, [0][1][RTW89_UK][1][25] = 20, [0][1][RTW89_UK][0][25] = -18, + [0][1][RTW89_THAILAND][1][25] = 6, + [0][1][RTW89_THAILAND][0][25] = -40, [0][1][RTW89_FCC][1][27] = -40, [0][1][RTW89_FCC][2][27] = 32, [0][1][RTW89_ETSI][1][27] = 20, @@ -51502,6 +53877,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][27] = -4, [0][1][RTW89_MKK][0][27] = -22, [0][1][RTW89_IC][1][27] = -40, + [0][1][RTW89_IC][2][27] = 32, [0][1][RTW89_KCC][1][27] = -14, [0][1][RTW89_KCC][0][27] = -14, [0][1][RTW89_ACMA][1][27] = 20, @@ -51511,6 +53887,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][27] = -18, [0][1][RTW89_UK][1][27] = 20, [0][1][RTW89_UK][0][27] = -18, + [0][1][RTW89_THAILAND][1][27] = 6, + [0][1][RTW89_THAILAND][0][27] = -40, [0][1][RTW89_FCC][1][29] = -40, [0][1][RTW89_FCC][2][29] = 32, [0][1][RTW89_ETSI][1][29] = 20, @@ -51518,6 +53896,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][29] = -4, [0][1][RTW89_MKK][0][29] = -22, [0][1][RTW89_IC][1][29] = -40, + [0][1][RTW89_IC][2][29] = 32, [0][1][RTW89_KCC][1][29] = -14, [0][1][RTW89_KCC][0][29] = -14, [0][1][RTW89_ACMA][1][29] = 20, @@ -51527,6 +53906,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][29] = -18, [0][1][RTW89_UK][1][29] = 20, [0][1][RTW89_UK][0][29] = -18, + [0][1][RTW89_THAILAND][1][29] = 6, + [0][1][RTW89_THAILAND][0][29] = -40, [0][1][RTW89_FCC][1][30] = -40, [0][1][RTW89_FCC][2][30] = 32, [0][1][RTW89_ETSI][1][30] = 20, @@ -51534,6 +53915,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][30] = -4, [0][1][RTW89_MKK][0][30] = -22, [0][1][RTW89_IC][1][30] = -40, + [0][1][RTW89_IC][2][30] = 32, [0][1][RTW89_KCC][1][30] = -14, [0][1][RTW89_KCC][0][30] = -14, [0][1][RTW89_ACMA][1][30] = 20, @@ -51543,6 +53925,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][30] = -18, [0][1][RTW89_UK][1][30] = 20, [0][1][RTW89_UK][0][30] = -18, + [0][1][RTW89_THAILAND][1][30] = 6, + [0][1][RTW89_THAILAND][0][30] = -40, [0][1][RTW89_FCC][1][32] = -40, [0][1][RTW89_FCC][2][32] = 32, [0][1][RTW89_ETSI][1][32] = 20, @@ -51550,6 +53934,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][32] = -4, [0][1][RTW89_MKK][0][32] = -22, [0][1][RTW89_IC][1][32] = -40, + [0][1][RTW89_IC][2][32] = 32, [0][1][RTW89_KCC][1][32] = -14, [0][1][RTW89_KCC][0][32] = -14, [0][1][RTW89_ACMA][1][32] = 20, @@ -51559,6 +53944,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][32] = -18, [0][1][RTW89_UK][1][32] = 20, [0][1][RTW89_UK][0][32] = -18, + [0][1][RTW89_THAILAND][1][32] = 6, + [0][1][RTW89_THAILAND][0][32] = -40, [0][1][RTW89_FCC][1][34] = -40, [0][1][RTW89_FCC][2][34] = 32, [0][1][RTW89_ETSI][1][34] = 20, @@ -51566,6 +53953,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][34] = -4, [0][1][RTW89_MKK][0][34] = -22, [0][1][RTW89_IC][1][34] = -40, + [0][1][RTW89_IC][2][34] = 32, [0][1][RTW89_KCC][1][34] = -14, [0][1][RTW89_KCC][0][34] = -14, [0][1][RTW89_ACMA][1][34] = 20, @@ -51575,6 +53963,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][34] = -18, [0][1][RTW89_UK][1][34] = 20, [0][1][RTW89_UK][0][34] = -18, + [0][1][RTW89_THAILAND][1][34] = 6, + [0][1][RTW89_THAILAND][0][34] = -40, [0][1][RTW89_FCC][1][36] = -40, [0][1][RTW89_FCC][2][36] = 32, [0][1][RTW89_ETSI][1][36] = 20, @@ -51582,6 +53972,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][36] = -4, [0][1][RTW89_MKK][0][36] = -22, [0][1][RTW89_IC][1][36] = -40, + [0][1][RTW89_IC][2][36] = 32, [0][1][RTW89_KCC][1][36] = -14, [0][1][RTW89_KCC][0][36] = -14, [0][1][RTW89_ACMA][1][36] = 20, @@ -51591,6 +53982,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][36] = -18, [0][1][RTW89_UK][1][36] = 20, [0][1][RTW89_UK][0][36] = -18, + [0][1][RTW89_THAILAND][1][36] = 6, + [0][1][RTW89_THAILAND][0][36] = -40, [0][1][RTW89_FCC][1][38] = -40, [0][1][RTW89_FCC][2][38] = 32, [0][1][RTW89_ETSI][1][38] = 20, @@ -51598,6 +53991,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][38] = -4, [0][1][RTW89_MKK][0][38] = -22, [0][1][RTW89_IC][1][38] = -40, + [0][1][RTW89_IC][2][38] = 32, [0][1][RTW89_KCC][1][38] = -14, [0][1][RTW89_KCC][0][38] = -14, [0][1][RTW89_ACMA][1][38] = 20, @@ -51607,6 +54001,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][38] = -18, [0][1][RTW89_UK][1][38] = 20, [0][1][RTW89_UK][0][38] = -18, + [0][1][RTW89_THAILAND][1][38] = 6, + [0][1][RTW89_THAILAND][0][38] = -40, [0][1][RTW89_FCC][1][40] = -40, [0][1][RTW89_FCC][2][40] = 32, [0][1][RTW89_ETSI][1][40] = 20, @@ -51614,6 +54010,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][40] = -4, [0][1][RTW89_MKK][0][40] = -22, [0][1][RTW89_IC][1][40] = -40, + [0][1][RTW89_IC][2][40] = 32, [0][1][RTW89_KCC][1][40] = -14, [0][1][RTW89_KCC][0][40] = -14, [0][1][RTW89_ACMA][1][40] = 20, @@ -51623,6 +54020,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][40] = -18, [0][1][RTW89_UK][1][40] = 20, [0][1][RTW89_UK][0][40] = -18, + [0][1][RTW89_THAILAND][1][40] = 6, + [0][1][RTW89_THAILAND][0][40] = -40, [0][1][RTW89_FCC][1][42] = -40, [0][1][RTW89_FCC][2][42] = 32, [0][1][RTW89_ETSI][1][42] = 20, @@ -51630,6 +54029,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][42] = -4, [0][1][RTW89_MKK][0][42] = -22, [0][1][RTW89_IC][1][42] = -40, + [0][1][RTW89_IC][2][42] = 32, [0][1][RTW89_KCC][1][42] = -14, [0][1][RTW89_KCC][0][42] = -14, [0][1][RTW89_ACMA][1][42] = 20, @@ -51639,6 +54039,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][42] = -18, [0][1][RTW89_UK][1][42] = 20, [0][1][RTW89_UK][0][42] = -18, + [0][1][RTW89_THAILAND][1][42] = 6, + [0][1][RTW89_THAILAND][0][42] = -40, [0][1][RTW89_FCC][1][44] = -40, [0][1][RTW89_FCC][2][44] = 32, [0][1][RTW89_ETSI][1][44] = 20, @@ -51646,6 +54048,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][44] = -4, [0][1][RTW89_MKK][0][44] = -22, [0][1][RTW89_IC][1][44] = -40, + [0][1][RTW89_IC][2][44] = 32, [0][1][RTW89_KCC][1][44] = -14, [0][1][RTW89_KCC][0][44] = -14, [0][1][RTW89_ACMA][1][44] = 20, @@ -51655,6 +54058,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][44] = -18, [0][1][RTW89_UK][1][44] = 20, [0][1][RTW89_UK][0][44] = -18, + [0][1][RTW89_THAILAND][1][44] = 6, + [0][1][RTW89_THAILAND][0][44] = -40, [0][1][RTW89_FCC][1][45] = -40, [0][1][RTW89_FCC][2][45] = 127, [0][1][RTW89_ETSI][1][45] = 127, @@ -51662,6 +54067,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][45] = 127, [0][1][RTW89_MKK][0][45] = 127, [0][1][RTW89_IC][1][45] = -40, + [0][1][RTW89_IC][2][45] = 32, [0][1][RTW89_KCC][1][45] = -14, [0][1][RTW89_KCC][0][45] = 127, [0][1][RTW89_ACMA][1][45] = 127, @@ -51671,6 +54077,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][45] = 127, [0][1][RTW89_UK][1][45] = 127, [0][1][RTW89_UK][0][45] = 127, + [0][1][RTW89_THAILAND][1][45] = 127, + [0][1][RTW89_THAILAND][0][45] = 127, [0][1][RTW89_FCC][1][47] = -40, [0][1][RTW89_FCC][2][47] = 127, [0][1][RTW89_ETSI][1][47] = 127, @@ -51678,6 +54086,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][47] = 127, [0][1][RTW89_MKK][0][47] = 127, [0][1][RTW89_IC][1][47] = -40, + [0][1][RTW89_IC][2][47] = 32, [0][1][RTW89_KCC][1][47] = -14, [0][1][RTW89_KCC][0][47] = 127, [0][1][RTW89_ACMA][1][47] = 127, @@ -51687,6 +54096,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][47] = 127, [0][1][RTW89_UK][1][47] = 127, [0][1][RTW89_UK][0][47] = 127, + [0][1][RTW89_THAILAND][1][47] = 127, + [0][1][RTW89_THAILAND][0][47] = 127, [0][1][RTW89_FCC][1][49] = -40, [0][1][RTW89_FCC][2][49] = 127, [0][1][RTW89_ETSI][1][49] = 127, @@ -51694,6 +54105,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][49] = 127, [0][1][RTW89_MKK][0][49] = 127, [0][1][RTW89_IC][1][49] = -40, + [0][1][RTW89_IC][2][49] = 32, [0][1][RTW89_KCC][1][49] = -14, [0][1][RTW89_KCC][0][49] = 127, [0][1][RTW89_ACMA][1][49] = 127, @@ -51703,6 +54115,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][49] = 127, [0][1][RTW89_UK][1][49] = 127, [0][1][RTW89_UK][0][49] = 127, + [0][1][RTW89_THAILAND][1][49] = 127, + [0][1][RTW89_THAILAND][0][49] = 127, [0][1][RTW89_FCC][1][51] = -40, [0][1][RTW89_FCC][2][51] = 127, [0][1][RTW89_ETSI][1][51] = 127, @@ -51710,6 +54124,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][51] = 127, [0][1][RTW89_MKK][0][51] = 127, [0][1][RTW89_IC][1][51] = -40, + [0][1][RTW89_IC][2][51] = 32, [0][1][RTW89_KCC][1][51] = -14, [0][1][RTW89_KCC][0][51] = 127, [0][1][RTW89_ACMA][1][51] = 127, @@ -51719,6 +54134,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][51] = 127, [0][1][RTW89_UK][1][51] = 127, [0][1][RTW89_UK][0][51] = 127, + [0][1][RTW89_THAILAND][1][51] = 127, + [0][1][RTW89_THAILAND][0][51] = 127, [0][1][RTW89_FCC][1][53] = -40, [0][1][RTW89_FCC][2][53] = 127, [0][1][RTW89_ETSI][1][53] = 127, @@ -51726,6 +54143,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][53] = 127, [0][1][RTW89_MKK][0][53] = 127, [0][1][RTW89_IC][1][53] = -40, + [0][1][RTW89_IC][2][53] = 32, [0][1][RTW89_KCC][1][53] = -14, [0][1][RTW89_KCC][0][53] = 127, [0][1][RTW89_ACMA][1][53] = 127, @@ -51735,6 +54153,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][53] = 127, [0][1][RTW89_UK][1][53] = 127, [0][1][RTW89_UK][0][53] = 127, + [0][1][RTW89_THAILAND][1][53] = 127, + [0][1][RTW89_THAILAND][0][53] = 127, [0][1][RTW89_FCC][1][55] = -40, [0][1][RTW89_FCC][2][55] = 30, [0][1][RTW89_ETSI][1][55] = 127, @@ -51742,6 +54162,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][55] = 127, [0][1][RTW89_MKK][0][55] = 127, [0][1][RTW89_IC][1][55] = -40, + [0][1][RTW89_IC][2][55] = 30, [0][1][RTW89_KCC][1][55] = -14, [0][1][RTW89_KCC][0][55] = 127, [0][1][RTW89_ACMA][1][55] = 127, @@ -51751,6 +54172,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][55] = 127, [0][1][RTW89_UK][1][55] = 127, [0][1][RTW89_UK][0][55] = 127, + [0][1][RTW89_THAILAND][1][55] = 127, + [0][1][RTW89_THAILAND][0][55] = 127, [0][1][RTW89_FCC][1][57] = -40, [0][1][RTW89_FCC][2][57] = 30, [0][1][RTW89_ETSI][1][57] = 127, @@ -51758,6 +54181,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][57] = 127, [0][1][RTW89_MKK][0][57] = 127, [0][1][RTW89_IC][1][57] = -40, + [0][1][RTW89_IC][2][57] = 30, [0][1][RTW89_KCC][1][57] = -14, [0][1][RTW89_KCC][0][57] = 127, [0][1][RTW89_ACMA][1][57] = 127, @@ -51767,6 +54191,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][57] = 127, [0][1][RTW89_UK][1][57] = 127, [0][1][RTW89_UK][0][57] = 127, + [0][1][RTW89_THAILAND][1][57] = 127, + [0][1][RTW89_THAILAND][0][57] = 127, [0][1][RTW89_FCC][1][59] = -40, [0][1][RTW89_FCC][2][59] = 30, [0][1][RTW89_ETSI][1][59] = 127, @@ -51774,6 +54200,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][59] = 127, [0][1][RTW89_MKK][0][59] = 127, [0][1][RTW89_IC][1][59] = -40, + [0][1][RTW89_IC][2][59] = 30, [0][1][RTW89_KCC][1][59] = -14, [0][1][RTW89_KCC][0][59] = 127, [0][1][RTW89_ACMA][1][59] = 127, @@ -51783,6 +54210,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][59] = 127, [0][1][RTW89_UK][1][59] = 127, [0][1][RTW89_UK][0][59] = 127, + [0][1][RTW89_THAILAND][1][59] = 127, + [0][1][RTW89_THAILAND][0][59] = 127, [0][1][RTW89_FCC][1][60] = -40, [0][1][RTW89_FCC][2][60] = 30, [0][1][RTW89_ETSI][1][60] = 127, @@ -51790,6 +54219,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][60] = 127, [0][1][RTW89_MKK][0][60] = 127, [0][1][RTW89_IC][1][60] = -40, + [0][1][RTW89_IC][2][60] = 30, [0][1][RTW89_KCC][1][60] = -14, [0][1][RTW89_KCC][0][60] = 127, [0][1][RTW89_ACMA][1][60] = 127, @@ -51799,6 +54229,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][60] = 127, [0][1][RTW89_UK][1][60] = 127, [0][1][RTW89_UK][0][60] = 127, + [0][1][RTW89_THAILAND][1][60] = 127, + [0][1][RTW89_THAILAND][0][60] = 127, [0][1][RTW89_FCC][1][62] = -40, [0][1][RTW89_FCC][2][62] = 30, [0][1][RTW89_ETSI][1][62] = 127, @@ -51806,6 +54238,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][62] = 127, [0][1][RTW89_MKK][0][62] = 127, [0][1][RTW89_IC][1][62] = -40, + [0][1][RTW89_IC][2][62] = 30, [0][1][RTW89_KCC][1][62] = -14, [0][1][RTW89_KCC][0][62] = 127, [0][1][RTW89_ACMA][1][62] = 127, @@ -51815,6 +54248,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][62] = 127, [0][1][RTW89_UK][1][62] = 127, [0][1][RTW89_UK][0][62] = 127, + [0][1][RTW89_THAILAND][1][62] = 127, + [0][1][RTW89_THAILAND][0][62] = 127, [0][1][RTW89_FCC][1][64] = -40, [0][1][RTW89_FCC][2][64] = 30, [0][1][RTW89_ETSI][1][64] = 127, @@ -51822,6 +54257,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][64] = 127, [0][1][RTW89_MKK][0][64] = 127, [0][1][RTW89_IC][1][64] = -40, + [0][1][RTW89_IC][2][64] = 30, [0][1][RTW89_KCC][1][64] = -14, [0][1][RTW89_KCC][0][64] = 127, [0][1][RTW89_ACMA][1][64] = 127, @@ -51831,6 +54267,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][64] = 127, [0][1][RTW89_UK][1][64] = 127, [0][1][RTW89_UK][0][64] = 127, + [0][1][RTW89_THAILAND][1][64] = 127, + [0][1][RTW89_THAILAND][0][64] = 127, [0][1][RTW89_FCC][1][66] = -40, [0][1][RTW89_FCC][2][66] = 30, [0][1][RTW89_ETSI][1][66] = 127, @@ -51838,6 +54276,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][66] = 127, [0][1][RTW89_MKK][0][66] = 127, [0][1][RTW89_IC][1][66] = -40, + [0][1][RTW89_IC][2][66] = 30, [0][1][RTW89_KCC][1][66] = -14, [0][1][RTW89_KCC][0][66] = 127, [0][1][RTW89_ACMA][1][66] = 127, @@ -51847,6 +54286,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][66] = 127, [0][1][RTW89_UK][1][66] = 127, [0][1][RTW89_UK][0][66] = 127, + [0][1][RTW89_THAILAND][1][66] = 127, + [0][1][RTW89_THAILAND][0][66] = 127, [0][1][RTW89_FCC][1][68] = -40, [0][1][RTW89_FCC][2][68] = 30, [0][1][RTW89_ETSI][1][68] = 127, @@ -51854,6 +54295,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][68] = 127, [0][1][RTW89_MKK][0][68] = 127, [0][1][RTW89_IC][1][68] = -40, + [0][1][RTW89_IC][2][68] = 30, [0][1][RTW89_KCC][1][68] = -14, [0][1][RTW89_KCC][0][68] = 127, [0][1][RTW89_ACMA][1][68] = 127, @@ -51863,6 +54305,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][68] = 127, [0][1][RTW89_UK][1][68] = 127, [0][1][RTW89_UK][0][68] = 127, + [0][1][RTW89_THAILAND][1][68] = 127, + [0][1][RTW89_THAILAND][0][68] = 127, [0][1][RTW89_FCC][1][70] = -38, [0][1][RTW89_FCC][2][70] = 30, [0][1][RTW89_ETSI][1][70] = 127, @@ -51870,6 +54314,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][70] = 127, [0][1][RTW89_MKK][0][70] = 127, [0][1][RTW89_IC][1][70] = -38, + [0][1][RTW89_IC][2][70] = 30, [0][1][RTW89_KCC][1][70] = -14, [0][1][RTW89_KCC][0][70] = 127, [0][1][RTW89_ACMA][1][70] = 127, @@ -51879,6 +54324,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][70] = 127, [0][1][RTW89_UK][1][70] = 127, [0][1][RTW89_UK][0][70] = 127, + [0][1][RTW89_THAILAND][1][70] = 127, + [0][1][RTW89_THAILAND][0][70] = 127, [0][1][RTW89_FCC][1][72] = -38, [0][1][RTW89_FCC][2][72] = 30, [0][1][RTW89_ETSI][1][72] = 127, @@ -51886,6 +54333,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][72] = 127, [0][1][RTW89_MKK][0][72] = 127, [0][1][RTW89_IC][1][72] = -38, + [0][1][RTW89_IC][2][72] = 30, [0][1][RTW89_KCC][1][72] = -14, [0][1][RTW89_KCC][0][72] = 127, [0][1][RTW89_ACMA][1][72] = 127, @@ -51895,6 +54343,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][72] = 127, [0][1][RTW89_UK][1][72] = 127, [0][1][RTW89_UK][0][72] = 127, + [0][1][RTW89_THAILAND][1][72] = 127, + [0][1][RTW89_THAILAND][0][72] = 127, [0][1][RTW89_FCC][1][74] = -38, [0][1][RTW89_FCC][2][74] = 30, [0][1][RTW89_ETSI][1][74] = 127, @@ -51902,6 +54352,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][74] = 127, [0][1][RTW89_MKK][0][74] = 127, [0][1][RTW89_IC][1][74] = -38, + [0][1][RTW89_IC][2][74] = 30, [0][1][RTW89_KCC][1][74] = -14, [0][1][RTW89_KCC][0][74] = 127, [0][1][RTW89_ACMA][1][74] = 127, @@ -51911,6 +54362,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][74] = 127, [0][1][RTW89_UK][1][74] = 127, [0][1][RTW89_UK][0][74] = 127, + [0][1][RTW89_THAILAND][1][74] = 127, + [0][1][RTW89_THAILAND][0][74] = 127, [0][1][RTW89_FCC][1][75] = -38, [0][1][RTW89_FCC][2][75] = 30, [0][1][RTW89_ETSI][1][75] = 127, @@ -51918,6 +54371,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][75] = 127, [0][1][RTW89_MKK][0][75] = 127, [0][1][RTW89_IC][1][75] = -38, + [0][1][RTW89_IC][2][75] = 30, [0][1][RTW89_KCC][1][75] = -14, [0][1][RTW89_KCC][0][75] = 127, [0][1][RTW89_ACMA][1][75] = 127, @@ -51927,6 +54381,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][75] = 127, [0][1][RTW89_UK][1][75] = 127, [0][1][RTW89_UK][0][75] = 127, + [0][1][RTW89_THAILAND][1][75] = 127, + [0][1][RTW89_THAILAND][0][75] = 127, [0][1][RTW89_FCC][1][77] = -38, [0][1][RTW89_FCC][2][77] = 30, [0][1][RTW89_ETSI][1][77] = 127, @@ -51934,6 +54390,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][77] = 127, [0][1][RTW89_MKK][0][77] = 127, [0][1][RTW89_IC][1][77] = -38, + [0][1][RTW89_IC][2][77] = 30, [0][1][RTW89_KCC][1][77] = -14, [0][1][RTW89_KCC][0][77] = 127, [0][1][RTW89_ACMA][1][77] = 127, @@ -51943,6 +54400,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][77] = 127, [0][1][RTW89_UK][1][77] = 127, [0][1][RTW89_UK][0][77] = 127, + [0][1][RTW89_THAILAND][1][77] = 127, + [0][1][RTW89_THAILAND][0][77] = 127, [0][1][RTW89_FCC][1][79] = -38, [0][1][RTW89_FCC][2][79] = 30, [0][1][RTW89_ETSI][1][79] = 127, @@ -51950,6 +54409,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][79] = 127, [0][1][RTW89_MKK][0][79] = 127, [0][1][RTW89_IC][1][79] = -38, + [0][1][RTW89_IC][2][79] = 30, [0][1][RTW89_KCC][1][79] = -14, [0][1][RTW89_KCC][0][79] = 127, [0][1][RTW89_ACMA][1][79] = 127, @@ -51959,6 +54419,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][79] = 127, [0][1][RTW89_UK][1][79] = 127, [0][1][RTW89_UK][0][79] = 127, + [0][1][RTW89_THAILAND][1][79] = 127, + [0][1][RTW89_THAILAND][0][79] = 127, [0][1][RTW89_FCC][1][81] = -38, [0][1][RTW89_FCC][2][81] = 30, [0][1][RTW89_ETSI][1][81] = 127, @@ -51966,6 +54428,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][81] = 127, [0][1][RTW89_MKK][0][81] = 127, [0][1][RTW89_IC][1][81] = -38, + [0][1][RTW89_IC][2][81] = 30, [0][1][RTW89_KCC][1][81] = -14, [0][1][RTW89_KCC][0][81] = 127, [0][1][RTW89_ACMA][1][81] = 127, @@ -51975,6 +54438,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][81] = 127, [0][1][RTW89_UK][1][81] = 127, [0][1][RTW89_UK][0][81] = 127, + [0][1][RTW89_THAILAND][1][81] = 127, + [0][1][RTW89_THAILAND][0][81] = 127, [0][1][RTW89_FCC][1][83] = -38, [0][1][RTW89_FCC][2][83] = 30, [0][1][RTW89_ETSI][1][83] = 127, @@ -51982,6 +54447,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][83] = 127, [0][1][RTW89_MKK][0][83] = 127, [0][1][RTW89_IC][1][83] = -38, + [0][1][RTW89_IC][2][83] = 30, [0][1][RTW89_KCC][1][83] = -14, [0][1][RTW89_KCC][0][83] = 127, [0][1][RTW89_ACMA][1][83] = 127, @@ -51991,6 +54457,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][83] = 127, [0][1][RTW89_UK][1][83] = 127, [0][1][RTW89_UK][0][83] = 127, + [0][1][RTW89_THAILAND][1][83] = 127, + [0][1][RTW89_THAILAND][0][83] = 127, [0][1][RTW89_FCC][1][85] = -38, [0][1][RTW89_FCC][2][85] = 30, [0][1][RTW89_ETSI][1][85] = 127, @@ -51998,6 +54466,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][85] = 127, [0][1][RTW89_MKK][0][85] = 127, [0][1][RTW89_IC][1][85] = -38, + [0][1][RTW89_IC][2][85] = 30, [0][1][RTW89_KCC][1][85] = -14, [0][1][RTW89_KCC][0][85] = 127, [0][1][RTW89_ACMA][1][85] = 127, @@ -52007,6 +54476,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][85] = 127, [0][1][RTW89_UK][1][85] = 127, [0][1][RTW89_UK][0][85] = 127, + [0][1][RTW89_THAILAND][1][85] = 127, + [0][1][RTW89_THAILAND][0][85] = 127, [0][1][RTW89_FCC][1][87] = -40, [0][1][RTW89_FCC][2][87] = 127, [0][1][RTW89_ETSI][1][87] = 127, @@ -52014,6 +54485,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][87] = 127, [0][1][RTW89_MKK][0][87] = 127, [0][1][RTW89_IC][1][87] = -40, + [0][1][RTW89_IC][2][87] = 127, [0][1][RTW89_KCC][1][87] = -14, [0][1][RTW89_KCC][0][87] = 127, [0][1][RTW89_ACMA][1][87] = 127, @@ -52023,6 +54495,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][87] = 127, [0][1][RTW89_UK][1][87] = 127, [0][1][RTW89_UK][0][87] = 127, + [0][1][RTW89_THAILAND][1][87] = 127, + [0][1][RTW89_THAILAND][0][87] = 127, [0][1][RTW89_FCC][1][89] = -38, [0][1][RTW89_FCC][2][89] = 127, [0][1][RTW89_ETSI][1][89] = 127, @@ -52030,6 +54504,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][89] = 127, [0][1][RTW89_MKK][0][89] = 127, [0][1][RTW89_IC][1][89] = -38, + [0][1][RTW89_IC][2][89] = 127, [0][1][RTW89_KCC][1][89] = -14, [0][1][RTW89_KCC][0][89] = 127, [0][1][RTW89_ACMA][1][89] = 127, @@ -52039,6 +54514,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][89] = 127, [0][1][RTW89_UK][1][89] = 127, [0][1][RTW89_UK][0][89] = 127, + [0][1][RTW89_THAILAND][1][89] = 127, + [0][1][RTW89_THAILAND][0][89] = 127, [0][1][RTW89_FCC][1][90] = -38, [0][1][RTW89_FCC][2][90] = 127, [0][1][RTW89_ETSI][1][90] = 127, @@ -52046,6 +54523,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][90] = 127, [0][1][RTW89_MKK][0][90] = 127, [0][1][RTW89_IC][1][90] = -38, + [0][1][RTW89_IC][2][90] = 127, [0][1][RTW89_KCC][1][90] = -14, [0][1][RTW89_KCC][0][90] = 127, [0][1][RTW89_ACMA][1][90] = 127, @@ -52055,6 +54533,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][90] = 127, [0][1][RTW89_UK][1][90] = 127, [0][1][RTW89_UK][0][90] = 127, + [0][1][RTW89_THAILAND][1][90] = 127, + [0][1][RTW89_THAILAND][0][90] = 127, [0][1][RTW89_FCC][1][92] = -38, [0][1][RTW89_FCC][2][92] = 127, [0][1][RTW89_ETSI][1][92] = 127, @@ -52062,6 +54542,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][92] = 127, [0][1][RTW89_MKK][0][92] = 127, [0][1][RTW89_IC][1][92] = -38, + [0][1][RTW89_IC][2][92] = 127, [0][1][RTW89_KCC][1][92] = -14, [0][1][RTW89_KCC][0][92] = 127, [0][1][RTW89_ACMA][1][92] = 127, @@ -52071,6 +54552,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][92] = 127, [0][1][RTW89_UK][1][92] = 127, [0][1][RTW89_UK][0][92] = 127, + [0][1][RTW89_THAILAND][1][92] = 127, + [0][1][RTW89_THAILAND][0][92] = 127, [0][1][RTW89_FCC][1][94] = -38, [0][1][RTW89_FCC][2][94] = 127, [0][1][RTW89_ETSI][1][94] = 127, @@ -52078,6 +54561,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][94] = 127, [0][1][RTW89_MKK][0][94] = 127, [0][1][RTW89_IC][1][94] = -38, + [0][1][RTW89_IC][2][94] = 127, [0][1][RTW89_KCC][1][94] = -14, [0][1][RTW89_KCC][0][94] = 127, [0][1][RTW89_ACMA][1][94] = 127, @@ -52087,6 +54571,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][94] = 127, [0][1][RTW89_UK][1][94] = 127, [0][1][RTW89_UK][0][94] = 127, + [0][1][RTW89_THAILAND][1][94] = 127, + [0][1][RTW89_THAILAND][0][94] = 127, [0][1][RTW89_FCC][1][96] = -38, [0][1][RTW89_FCC][2][96] = 127, [0][1][RTW89_ETSI][1][96] = 127, @@ -52094,6 +54580,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][96] = 127, [0][1][RTW89_MKK][0][96] = 127, [0][1][RTW89_IC][1][96] = -38, + [0][1][RTW89_IC][2][96] = 127, [0][1][RTW89_KCC][1][96] = -14, [0][1][RTW89_KCC][0][96] = 127, [0][1][RTW89_ACMA][1][96] = 127, @@ -52103,6 +54590,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][96] = 127, [0][1][RTW89_UK][1][96] = 127, [0][1][RTW89_UK][0][96] = 127, + [0][1][RTW89_THAILAND][1][96] = 127, + [0][1][RTW89_THAILAND][0][96] = 127, [0][1][RTW89_FCC][1][98] = -38, [0][1][RTW89_FCC][2][98] = 127, [0][1][RTW89_ETSI][1][98] = 127, @@ -52110,6 +54599,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][98] = 127, [0][1][RTW89_MKK][0][98] = 127, [0][1][RTW89_IC][1][98] = -38, + [0][1][RTW89_IC][2][98] = 127, [0][1][RTW89_KCC][1][98] = -14, [0][1][RTW89_KCC][0][98] = 127, [0][1][RTW89_ACMA][1][98] = 127, @@ -52119,6 +54609,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][98] = 127, [0][1][RTW89_UK][1][98] = 127, [0][1][RTW89_UK][0][98] = 127, + [0][1][RTW89_THAILAND][1][98] = 127, + [0][1][RTW89_THAILAND][0][98] = 127, [0][1][RTW89_FCC][1][100] = -38, [0][1][RTW89_FCC][2][100] = 127, [0][1][RTW89_ETSI][1][100] = 127, @@ -52126,6 +54618,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][100] = 127, [0][1][RTW89_MKK][0][100] = 127, [0][1][RTW89_IC][1][100] = -38, + [0][1][RTW89_IC][2][100] = 127, [0][1][RTW89_KCC][1][100] = -14, [0][1][RTW89_KCC][0][100] = 127, [0][1][RTW89_ACMA][1][100] = 127, @@ -52135,6 +54628,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][100] = 127, [0][1][RTW89_UK][1][100] = 127, [0][1][RTW89_UK][0][100] = 127, + [0][1][RTW89_THAILAND][1][100] = 127, + [0][1][RTW89_THAILAND][0][100] = 127, [0][1][RTW89_FCC][1][102] = -38, [0][1][RTW89_FCC][2][102] = 127, [0][1][RTW89_ETSI][1][102] = 127, @@ -52142,6 +54637,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][102] = 127, [0][1][RTW89_MKK][0][102] = 127, [0][1][RTW89_IC][1][102] = -38, + [0][1][RTW89_IC][2][102] = 127, [0][1][RTW89_KCC][1][102] = -14, [0][1][RTW89_KCC][0][102] = 127, [0][1][RTW89_ACMA][1][102] = 127, @@ -52151,6 +54647,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][102] = 127, [0][1][RTW89_UK][1][102] = 127, [0][1][RTW89_UK][0][102] = 127, + [0][1][RTW89_THAILAND][1][102] = 127, + [0][1][RTW89_THAILAND][0][102] = 127, [0][1][RTW89_FCC][1][104] = -38, [0][1][RTW89_FCC][2][104] = 127, [0][1][RTW89_ETSI][1][104] = 127, @@ -52158,6 +54656,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][104] = 127, [0][1][RTW89_MKK][0][104] = 127, [0][1][RTW89_IC][1][104] = -38, + [0][1][RTW89_IC][2][104] = 127, [0][1][RTW89_KCC][1][104] = -14, [0][1][RTW89_KCC][0][104] = 127, [0][1][RTW89_ACMA][1][104] = 127, @@ -52167,6 +54666,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][104] = 127, [0][1][RTW89_UK][1][104] = 127, [0][1][RTW89_UK][0][104] = 127, + [0][1][RTW89_THAILAND][1][104] = 127, + [0][1][RTW89_THAILAND][0][104] = 127, [0][1][RTW89_FCC][1][105] = -38, [0][1][RTW89_FCC][2][105] = 127, [0][1][RTW89_ETSI][1][105] = 127, @@ -52174,6 +54675,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][105] = 127, [0][1][RTW89_MKK][0][105] = 127, [0][1][RTW89_IC][1][105] = -38, + [0][1][RTW89_IC][2][105] = 127, [0][1][RTW89_KCC][1][105] = -14, [0][1][RTW89_KCC][0][105] = 127, [0][1][RTW89_ACMA][1][105] = 127, @@ -52183,6 +54685,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][105] = 127, [0][1][RTW89_UK][1][105] = 127, [0][1][RTW89_UK][0][105] = 127, + [0][1][RTW89_THAILAND][1][105] = 127, + [0][1][RTW89_THAILAND][0][105] = 127, [0][1][RTW89_FCC][1][107] = -34, [0][1][RTW89_FCC][2][107] = 127, [0][1][RTW89_ETSI][1][107] = 127, @@ -52190,6 +54694,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][107] = 127, [0][1][RTW89_MKK][0][107] = 127, [0][1][RTW89_IC][1][107] = -34, + [0][1][RTW89_IC][2][107] = 127, [0][1][RTW89_KCC][1][107] = -14, [0][1][RTW89_KCC][0][107] = 127, [0][1][RTW89_ACMA][1][107] = 127, @@ -52199,6 +54704,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][107] = 127, [0][1][RTW89_UK][1][107] = 127, [0][1][RTW89_UK][0][107] = 127, + [0][1][RTW89_THAILAND][1][107] = 127, + [0][1][RTW89_THAILAND][0][107] = 127, [0][1][RTW89_FCC][1][109] = -34, [0][1][RTW89_FCC][2][109] = 127, [0][1][RTW89_ETSI][1][109] = 127, @@ -52206,6 +54713,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][109] = 127, [0][1][RTW89_MKK][0][109] = 127, [0][1][RTW89_IC][1][109] = -34, + [0][1][RTW89_IC][2][109] = 127, [0][1][RTW89_KCC][1][109] = 127, [0][1][RTW89_KCC][0][109] = 127, [0][1][RTW89_ACMA][1][109] = 127, @@ -52215,6 +54723,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][109] = 127, [0][1][RTW89_UK][1][109] = 127, [0][1][RTW89_UK][0][109] = 127, + [0][1][RTW89_THAILAND][1][109] = 127, + [0][1][RTW89_THAILAND][0][109] = 127, [0][1][RTW89_FCC][1][111] = 127, [0][1][RTW89_FCC][2][111] = 127, [0][1][RTW89_ETSI][1][111] = 127, @@ -52222,6 +54732,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][111] = 127, [0][1][RTW89_MKK][0][111] = 127, [0][1][RTW89_IC][1][111] = 127, + [0][1][RTW89_IC][2][111] = 127, [0][1][RTW89_KCC][1][111] = 127, [0][1][RTW89_KCC][0][111] = 127, [0][1][RTW89_ACMA][1][111] = 127, @@ -52231,6 +54742,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][111] = 127, [0][1][RTW89_UK][1][111] = 127, [0][1][RTW89_UK][0][111] = 127, + [0][1][RTW89_THAILAND][1][111] = 127, + [0][1][RTW89_THAILAND][0][111] = 127, [0][1][RTW89_FCC][1][113] = 127, [0][1][RTW89_FCC][2][113] = 127, [0][1][RTW89_ETSI][1][113] = 127, @@ -52238,6 +54751,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][113] = 127, [0][1][RTW89_MKK][0][113] = 127, [0][1][RTW89_IC][1][113] = 127, + [0][1][RTW89_IC][2][113] = 127, [0][1][RTW89_KCC][1][113] = 127, [0][1][RTW89_KCC][0][113] = 127, [0][1][RTW89_ACMA][1][113] = 127, @@ -52247,6 +54761,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][113] = 127, [0][1][RTW89_UK][1][113] = 127, [0][1][RTW89_UK][0][113] = 127, + [0][1][RTW89_THAILAND][1][113] = 127, + [0][1][RTW89_THAILAND][0][113] = 127, [0][1][RTW89_FCC][1][115] = 127, [0][1][RTW89_FCC][2][115] = 127, [0][1][RTW89_ETSI][1][115] = 127, @@ -52254,6 +54770,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][115] = 127, [0][1][RTW89_MKK][0][115] = 127, [0][1][RTW89_IC][1][115] = 127, + [0][1][RTW89_IC][2][115] = 127, [0][1][RTW89_KCC][1][115] = 127, [0][1][RTW89_KCC][0][115] = 127, [0][1][RTW89_ACMA][1][115] = 127, @@ -52263,6 +54780,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][115] = 127, [0][1][RTW89_UK][1][115] = 127, [0][1][RTW89_UK][0][115] = 127, + [0][1][RTW89_THAILAND][1][115] = 127, + [0][1][RTW89_THAILAND][0][115] = 127, [0][1][RTW89_FCC][1][117] = 127, [0][1][RTW89_FCC][2][117] = 127, [0][1][RTW89_ETSI][1][117] = 127, @@ -52270,6 +54789,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][117] = 127, [0][1][RTW89_MKK][0][117] = 127, [0][1][RTW89_IC][1][117] = 127, + [0][1][RTW89_IC][2][117] = 127, [0][1][RTW89_KCC][1][117] = 127, [0][1][RTW89_KCC][0][117] = 127, [0][1][RTW89_ACMA][1][117] = 127, @@ -52279,6 +54799,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][117] = 127, [0][1][RTW89_UK][1][117] = 127, [0][1][RTW89_UK][0][117] = 127, + [0][1][RTW89_THAILAND][1][117] = 127, + [0][1][RTW89_THAILAND][0][117] = 127, [0][1][RTW89_FCC][1][119] = 127, [0][1][RTW89_FCC][2][119] = 127, [0][1][RTW89_ETSI][1][119] = 127, @@ -52286,6 +54808,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][119] = 127, [0][1][RTW89_MKK][0][119] = 127, [0][1][RTW89_IC][1][119] = 127, + [0][1][RTW89_IC][2][119] = 127, [0][1][RTW89_KCC][1][119] = 127, [0][1][RTW89_KCC][0][119] = 127, [0][1][RTW89_ACMA][1][119] = 127, @@ -52295,6 +54818,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][119] = 127, [0][1][RTW89_UK][1][119] = 127, [0][1][RTW89_UK][0][119] = 127, + [0][1][RTW89_THAILAND][1][119] = 127, + [0][1][RTW89_THAILAND][0][119] = 127, [1][0][RTW89_FCC][1][0] = -4, [1][0][RTW89_FCC][2][0] = 52, [1][0][RTW89_ETSI][1][0] = 46, @@ -52302,6 +54827,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][0] = 42, [1][0][RTW89_MKK][0][0] = 2, [1][0][RTW89_IC][1][0] = -4, + [1][0][RTW89_IC][2][0] = 52, [1][0][RTW89_KCC][1][0] = -2, [1][0][RTW89_KCC][0][0] = -2, [1][0][RTW89_ACMA][1][0] = 46, @@ -52311,6 +54837,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][0] = 6, [1][0][RTW89_UK][1][0] = 46, [1][0][RTW89_UK][0][0] = 6, + [1][0][RTW89_THAILAND][1][0] = 42, + [1][0][RTW89_THAILAND][0][0] = -4, [1][0][RTW89_FCC][1][2] = -4, [1][0][RTW89_FCC][2][2] = 52, [1][0][RTW89_ETSI][1][2] = 46, @@ -52318,6 +54846,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][2] = 42, [1][0][RTW89_MKK][0][2] = 2, [1][0][RTW89_IC][1][2] = -4, + [1][0][RTW89_IC][2][2] = 52, [1][0][RTW89_KCC][1][2] = -2, [1][0][RTW89_KCC][0][2] = -2, [1][0][RTW89_ACMA][1][2] = 46, @@ -52327,6 +54856,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][2] = 6, [1][0][RTW89_UK][1][2] = 46, [1][0][RTW89_UK][0][2] = 6, + [1][0][RTW89_THAILAND][1][2] = 42, + [1][0][RTW89_THAILAND][0][2] = -4, [1][0][RTW89_FCC][1][4] = -4, [1][0][RTW89_FCC][2][4] = 52, [1][0][RTW89_ETSI][1][4] = 46, @@ -52334,6 +54865,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][4] = 42, [1][0][RTW89_MKK][0][4] = 2, [1][0][RTW89_IC][1][4] = -4, + [1][0][RTW89_IC][2][4] = 52, [1][0][RTW89_KCC][1][4] = -2, [1][0][RTW89_KCC][0][4] = -2, [1][0][RTW89_ACMA][1][4] = 46, @@ -52343,6 +54875,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][4] = 6, [1][0][RTW89_UK][1][4] = 46, [1][0][RTW89_UK][0][4] = 6, + [1][0][RTW89_THAILAND][1][4] = 42, + [1][0][RTW89_THAILAND][0][4] = -4, [1][0][RTW89_FCC][1][6] = -4, [1][0][RTW89_FCC][2][6] = 52, [1][0][RTW89_ETSI][1][6] = 46, @@ -52350,6 +54884,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][6] = 42, [1][0][RTW89_MKK][0][6] = 2, [1][0][RTW89_IC][1][6] = -4, + [1][0][RTW89_IC][2][6] = 52, [1][0][RTW89_KCC][1][6] = -2, [1][0][RTW89_KCC][0][6] = -2, [1][0][RTW89_ACMA][1][6] = 46, @@ -52359,6 +54894,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][6] = 6, [1][0][RTW89_UK][1][6] = 46, [1][0][RTW89_UK][0][6] = 6, + [1][0][RTW89_THAILAND][1][6] = 42, + [1][0][RTW89_THAILAND][0][6] = -4, [1][0][RTW89_FCC][1][8] = -4, [1][0][RTW89_FCC][2][8] = 52, [1][0][RTW89_ETSI][1][8] = 46, @@ -52366,6 +54903,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][8] = 42, [1][0][RTW89_MKK][0][8] = 2, [1][0][RTW89_IC][1][8] = -4, + [1][0][RTW89_IC][2][8] = 52, [1][0][RTW89_KCC][1][8] = -2, [1][0][RTW89_KCC][0][8] = -2, [1][0][RTW89_ACMA][1][8] = 46, @@ -52375,6 +54913,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][8] = 6, [1][0][RTW89_UK][1][8] = 46, [1][0][RTW89_UK][0][8] = 6, + [1][0][RTW89_THAILAND][1][8] = 42, + [1][0][RTW89_THAILAND][0][8] = -4, [1][0][RTW89_FCC][1][10] = -4, [1][0][RTW89_FCC][2][10] = 52, [1][0][RTW89_ETSI][1][10] = 46, @@ -52382,6 +54922,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][10] = 42, [1][0][RTW89_MKK][0][10] = 2, [1][0][RTW89_IC][1][10] = -4, + [1][0][RTW89_IC][2][10] = 52, [1][0][RTW89_KCC][1][10] = -2, [1][0][RTW89_KCC][0][10] = -2, [1][0][RTW89_ACMA][1][10] = 46, @@ -52391,6 +54932,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][10] = 6, [1][0][RTW89_UK][1][10] = 46, [1][0][RTW89_UK][0][10] = 6, + [1][0][RTW89_THAILAND][1][10] = 42, + [1][0][RTW89_THAILAND][0][10] = -4, [1][0][RTW89_FCC][1][12] = -4, [1][0][RTW89_FCC][2][12] = 52, [1][0][RTW89_ETSI][1][12] = 46, @@ -52398,6 +54941,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][12] = 42, [1][0][RTW89_MKK][0][12] = 2, [1][0][RTW89_IC][1][12] = -4, + [1][0][RTW89_IC][2][12] = 52, [1][0][RTW89_KCC][1][12] = -2, [1][0][RTW89_KCC][0][12] = -2, [1][0][RTW89_ACMA][1][12] = 46, @@ -52407,6 +54951,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][12] = 6, [1][0][RTW89_UK][1][12] = 46, [1][0][RTW89_UK][0][12] = 6, + [1][0][RTW89_THAILAND][1][12] = 42, + [1][0][RTW89_THAILAND][0][12] = -4, [1][0][RTW89_FCC][1][14] = -4, [1][0][RTW89_FCC][2][14] = 52, [1][0][RTW89_ETSI][1][14] = 46, @@ -52414,6 +54960,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][14] = 42, [1][0][RTW89_MKK][0][14] = 2, [1][0][RTW89_IC][1][14] = -4, + [1][0][RTW89_IC][2][14] = 52, [1][0][RTW89_KCC][1][14] = -2, [1][0][RTW89_KCC][0][14] = -2, [1][0][RTW89_ACMA][1][14] = 46, @@ -52423,6 +54970,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][14] = 6, [1][0][RTW89_UK][1][14] = 46, [1][0][RTW89_UK][0][14] = 6, + [1][0][RTW89_THAILAND][1][14] = 42, + [1][0][RTW89_THAILAND][0][14] = -4, [1][0][RTW89_FCC][1][15] = -4, [1][0][RTW89_FCC][2][15] = 52, [1][0][RTW89_ETSI][1][15] = 46, @@ -52430,6 +54979,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][15] = 42, [1][0][RTW89_MKK][0][15] = 2, [1][0][RTW89_IC][1][15] = -4, + [1][0][RTW89_IC][2][15] = 52, [1][0][RTW89_KCC][1][15] = -2, [1][0][RTW89_KCC][0][15] = -2, [1][0][RTW89_ACMA][1][15] = 46, @@ -52439,6 +54989,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][15] = 6, [1][0][RTW89_UK][1][15] = 46, [1][0][RTW89_UK][0][15] = 6, + [1][0][RTW89_THAILAND][1][15] = 42, + [1][0][RTW89_THAILAND][0][15] = -4, [1][0][RTW89_FCC][1][17] = -4, [1][0][RTW89_FCC][2][17] = 52, [1][0][RTW89_ETSI][1][17] = 46, @@ -52446,6 +54998,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][17] = 42, [1][0][RTW89_MKK][0][17] = 2, [1][0][RTW89_IC][1][17] = -4, + [1][0][RTW89_IC][2][17] = 52, [1][0][RTW89_KCC][1][17] = -2, [1][0][RTW89_KCC][0][17] = -2, [1][0][RTW89_ACMA][1][17] = 46, @@ -52455,6 +55008,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][17] = 6, [1][0][RTW89_UK][1][17] = 46, [1][0][RTW89_UK][0][17] = 6, + [1][0][RTW89_THAILAND][1][17] = 42, + [1][0][RTW89_THAILAND][0][17] = -4, [1][0][RTW89_FCC][1][19] = -4, [1][0][RTW89_FCC][2][19] = 52, [1][0][RTW89_ETSI][1][19] = 46, @@ -52462,6 +55017,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][19] = 42, [1][0][RTW89_MKK][0][19] = 2, [1][0][RTW89_IC][1][19] = -4, + [1][0][RTW89_IC][2][19] = 52, [1][0][RTW89_KCC][1][19] = -2, [1][0][RTW89_KCC][0][19] = -2, [1][0][RTW89_ACMA][1][19] = 46, @@ -52471,6 +55027,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][19] = 6, [1][0][RTW89_UK][1][19] = 46, [1][0][RTW89_UK][0][19] = 6, + [1][0][RTW89_THAILAND][1][19] = 42, + [1][0][RTW89_THAILAND][0][19] = -4, [1][0][RTW89_FCC][1][21] = -4, [1][0][RTW89_FCC][2][21] = 52, [1][0][RTW89_ETSI][1][21] = 46, @@ -52478,6 +55036,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][21] = 42, [1][0][RTW89_MKK][0][21] = 2, [1][0][RTW89_IC][1][21] = -4, + [1][0][RTW89_IC][2][21] = 52, [1][0][RTW89_KCC][1][21] = -2, [1][0][RTW89_KCC][0][21] = -2, [1][0][RTW89_ACMA][1][21] = 46, @@ -52487,6 +55046,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][21] = 6, [1][0][RTW89_UK][1][21] = 46, [1][0][RTW89_UK][0][21] = 6, + [1][0][RTW89_THAILAND][1][21] = 42, + [1][0][RTW89_THAILAND][0][21] = -4, [1][0][RTW89_FCC][1][23] = -4, [1][0][RTW89_FCC][2][23] = 66, [1][0][RTW89_ETSI][1][23] = 46, @@ -52494,6 +55055,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][23] = 42, [1][0][RTW89_MKK][0][23] = 2, [1][0][RTW89_IC][1][23] = -4, + [1][0][RTW89_IC][2][23] = 66, [1][0][RTW89_KCC][1][23] = -2, [1][0][RTW89_KCC][0][23] = -2, [1][0][RTW89_ACMA][1][23] = 46, @@ -52503,6 +55065,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][23] = 6, [1][0][RTW89_UK][1][23] = 46, [1][0][RTW89_UK][0][23] = 6, + [1][0][RTW89_THAILAND][1][23] = 42, + [1][0][RTW89_THAILAND][0][23] = -4, [1][0][RTW89_FCC][1][25] = -4, [1][0][RTW89_FCC][2][25] = 66, [1][0][RTW89_ETSI][1][25] = 46, @@ -52510,6 +55074,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][25] = 42, [1][0][RTW89_MKK][0][25] = 2, [1][0][RTW89_IC][1][25] = -4, + [1][0][RTW89_IC][2][25] = 66, [1][0][RTW89_KCC][1][25] = -2, [1][0][RTW89_KCC][0][25] = -2, [1][0][RTW89_ACMA][1][25] = 46, @@ -52519,6 +55084,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][25] = 6, [1][0][RTW89_UK][1][25] = 46, [1][0][RTW89_UK][0][25] = 6, + [1][0][RTW89_THAILAND][1][25] = 42, + [1][0][RTW89_THAILAND][0][25] = -4, [1][0][RTW89_FCC][1][27] = -4, [1][0][RTW89_FCC][2][27] = 66, [1][0][RTW89_ETSI][1][27] = 46, @@ -52526,6 +55093,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][27] = 42, [1][0][RTW89_MKK][0][27] = 2, [1][0][RTW89_IC][1][27] = -4, + [1][0][RTW89_IC][2][27] = 66, [1][0][RTW89_KCC][1][27] = -2, [1][0][RTW89_KCC][0][27] = -2, [1][0][RTW89_ACMA][1][27] = 46, @@ -52535,6 +55103,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][27] = 6, [1][0][RTW89_UK][1][27] = 46, [1][0][RTW89_UK][0][27] = 6, + [1][0][RTW89_THAILAND][1][27] = 42, + [1][0][RTW89_THAILAND][0][27] = -4, [1][0][RTW89_FCC][1][29] = -4, [1][0][RTW89_FCC][2][29] = 66, [1][0][RTW89_ETSI][1][29] = 46, @@ -52542,6 +55112,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][29] = 42, [1][0][RTW89_MKK][0][29] = 2, [1][0][RTW89_IC][1][29] = -4, + [1][0][RTW89_IC][2][29] = 66, [1][0][RTW89_KCC][1][29] = -2, [1][0][RTW89_KCC][0][29] = -2, [1][0][RTW89_ACMA][1][29] = 46, @@ -52551,6 +55122,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][29] = 6, [1][0][RTW89_UK][1][29] = 46, [1][0][RTW89_UK][0][29] = 6, + [1][0][RTW89_THAILAND][1][29] = 42, + [1][0][RTW89_THAILAND][0][29] = -4, [1][0][RTW89_FCC][1][30] = -4, [1][0][RTW89_FCC][2][30] = 66, [1][0][RTW89_ETSI][1][30] = 46, @@ -52558,6 +55131,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][30] = 42, [1][0][RTW89_MKK][0][30] = 2, [1][0][RTW89_IC][1][30] = -4, + [1][0][RTW89_IC][2][30] = 66, [1][0][RTW89_KCC][1][30] = -2, [1][0][RTW89_KCC][0][30] = -2, [1][0][RTW89_ACMA][1][30] = 46, @@ -52567,6 +55141,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][30] = 6, [1][0][RTW89_UK][1][30] = 46, [1][0][RTW89_UK][0][30] = 6, + [1][0][RTW89_THAILAND][1][30] = 42, + [1][0][RTW89_THAILAND][0][30] = -4, [1][0][RTW89_FCC][1][32] = -4, [1][0][RTW89_FCC][2][32] = 66, [1][0][RTW89_ETSI][1][32] = 46, @@ -52574,6 +55150,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][32] = 42, [1][0][RTW89_MKK][0][32] = 2, [1][0][RTW89_IC][1][32] = -4, + [1][0][RTW89_IC][2][32] = 66, [1][0][RTW89_KCC][1][32] = -2, [1][0][RTW89_KCC][0][32] = -2, [1][0][RTW89_ACMA][1][32] = 46, @@ -52583,6 +55160,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][32] = 6, [1][0][RTW89_UK][1][32] = 46, [1][0][RTW89_UK][0][32] = 6, + [1][0][RTW89_THAILAND][1][32] = 42, + [1][0][RTW89_THAILAND][0][32] = -4, [1][0][RTW89_FCC][1][34] = -4, [1][0][RTW89_FCC][2][34] = 66, [1][0][RTW89_ETSI][1][34] = 46, @@ -52590,6 +55169,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][34] = 42, [1][0][RTW89_MKK][0][34] = 2, [1][0][RTW89_IC][1][34] = -4, + [1][0][RTW89_IC][2][34] = 66, [1][0][RTW89_KCC][1][34] = -2, [1][0][RTW89_KCC][0][34] = -2, [1][0][RTW89_ACMA][1][34] = 46, @@ -52599,6 +55179,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][34] = 6, [1][0][RTW89_UK][1][34] = 46, [1][0][RTW89_UK][0][34] = 6, + [1][0][RTW89_THAILAND][1][34] = 42, + [1][0][RTW89_THAILAND][0][34] = -4, [1][0][RTW89_FCC][1][36] = -4, [1][0][RTW89_FCC][2][36] = 66, [1][0][RTW89_ETSI][1][36] = 46, @@ -52606,6 +55188,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][36] = 42, [1][0][RTW89_MKK][0][36] = 2, [1][0][RTW89_IC][1][36] = -4, + [1][0][RTW89_IC][2][36] = 66, [1][0][RTW89_KCC][1][36] = -2, [1][0][RTW89_KCC][0][36] = -2, [1][0][RTW89_ACMA][1][36] = 46, @@ -52615,6 +55198,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][36] = 6, [1][0][RTW89_UK][1][36] = 46, [1][0][RTW89_UK][0][36] = 6, + [1][0][RTW89_THAILAND][1][36] = 42, + [1][0][RTW89_THAILAND][0][36] = -4, [1][0][RTW89_FCC][1][38] = -4, [1][0][RTW89_FCC][2][38] = 66, [1][0][RTW89_ETSI][1][38] = 46, @@ -52622,6 +55207,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][38] = 42, [1][0][RTW89_MKK][0][38] = 2, [1][0][RTW89_IC][1][38] = -4, + [1][0][RTW89_IC][2][38] = 66, [1][0][RTW89_KCC][1][38] = -2, [1][0][RTW89_KCC][0][38] = -2, [1][0][RTW89_ACMA][1][38] = 46, @@ -52631,6 +55217,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][38] = 6, [1][0][RTW89_UK][1][38] = 46, [1][0][RTW89_UK][0][38] = 6, + [1][0][RTW89_THAILAND][1][38] = 42, + [1][0][RTW89_THAILAND][0][38] = -4, [1][0][RTW89_FCC][1][40] = -4, [1][0][RTW89_FCC][2][40] = 66, [1][0][RTW89_ETSI][1][40] = 46, @@ -52638,6 +55226,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][40] = 42, [1][0][RTW89_MKK][0][40] = 2, [1][0][RTW89_IC][1][40] = -4, + [1][0][RTW89_IC][2][40] = 66, [1][0][RTW89_KCC][1][40] = -2, [1][0][RTW89_KCC][0][40] = -2, [1][0][RTW89_ACMA][1][40] = 46, @@ -52647,6 +55236,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][40] = 6, [1][0][RTW89_UK][1][40] = 46, [1][0][RTW89_UK][0][40] = 6, + [1][0][RTW89_THAILAND][1][40] = 42, + [1][0][RTW89_THAILAND][0][40] = -4, [1][0][RTW89_FCC][1][42] = -4, [1][0][RTW89_FCC][2][42] = 66, [1][0][RTW89_ETSI][1][42] = 46, @@ -52654,6 +55245,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][42] = 42, [1][0][RTW89_MKK][0][42] = 2, [1][0][RTW89_IC][1][42] = -4, + [1][0][RTW89_IC][2][42] = 66, [1][0][RTW89_KCC][1][42] = -2, [1][0][RTW89_KCC][0][42] = -2, [1][0][RTW89_ACMA][1][42] = 46, @@ -52663,6 +55255,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][42] = 6, [1][0][RTW89_UK][1][42] = 46, [1][0][RTW89_UK][0][42] = 6, + [1][0][RTW89_THAILAND][1][42] = 42, + [1][0][RTW89_THAILAND][0][42] = -4, [1][0][RTW89_FCC][1][44] = -4, [1][0][RTW89_FCC][2][44] = 66, [1][0][RTW89_ETSI][1][44] = 46, @@ -52670,6 +55264,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][44] = 22, [1][0][RTW89_MKK][0][44] = 4, [1][0][RTW89_IC][1][44] = -4, + [1][0][RTW89_IC][2][44] = 66, [1][0][RTW89_KCC][1][44] = -2, [1][0][RTW89_KCC][0][44] = -2, [1][0][RTW89_ACMA][1][44] = 46, @@ -52679,6 +55274,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][44] = 8, [1][0][RTW89_UK][1][44] = 46, [1][0][RTW89_UK][0][44] = 8, + [1][0][RTW89_THAILAND][1][44] = 42, + [1][0][RTW89_THAILAND][0][44] = -4, [1][0][RTW89_FCC][1][45] = -4, [1][0][RTW89_FCC][2][45] = 127, [1][0][RTW89_ETSI][1][45] = 127, @@ -52686,6 +55283,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][45] = 127, [1][0][RTW89_MKK][0][45] = 127, [1][0][RTW89_IC][1][45] = -4, + [1][0][RTW89_IC][2][45] = 68, [1][0][RTW89_KCC][1][45] = -2, [1][0][RTW89_KCC][0][45] = 127, [1][0][RTW89_ACMA][1][45] = 127, @@ -52695,6 +55293,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][45] = 127, [1][0][RTW89_UK][1][45] = 127, [1][0][RTW89_UK][0][45] = 127, + [1][0][RTW89_THAILAND][1][45] = 127, + [1][0][RTW89_THAILAND][0][45] = 127, [1][0][RTW89_FCC][1][47] = -4, [1][0][RTW89_FCC][2][47] = 127, [1][0][RTW89_ETSI][1][47] = 127, @@ -52702,6 +55302,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][47] = 127, [1][0][RTW89_MKK][0][47] = 127, [1][0][RTW89_IC][1][47] = -4, + [1][0][RTW89_IC][2][47] = 68, [1][0][RTW89_KCC][1][47] = -2, [1][0][RTW89_KCC][0][47] = 127, [1][0][RTW89_ACMA][1][47] = 127, @@ -52711,6 +55312,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][47] = 127, [1][0][RTW89_UK][1][47] = 127, [1][0][RTW89_UK][0][47] = 127, + [1][0][RTW89_THAILAND][1][47] = 127, + [1][0][RTW89_THAILAND][0][47] = 127, [1][0][RTW89_FCC][1][49] = -4, [1][0][RTW89_FCC][2][49] = 127, [1][0][RTW89_ETSI][1][49] = 127, @@ -52718,6 +55321,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][49] = 127, [1][0][RTW89_MKK][0][49] = 127, [1][0][RTW89_IC][1][49] = -4, + [1][0][RTW89_IC][2][49] = 68, [1][0][RTW89_KCC][1][49] = -2, [1][0][RTW89_KCC][0][49] = 127, [1][0][RTW89_ACMA][1][49] = 127, @@ -52727,6 +55331,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][49] = 127, [1][0][RTW89_UK][1][49] = 127, [1][0][RTW89_UK][0][49] = 127, + [1][0][RTW89_THAILAND][1][49] = 127, + [1][0][RTW89_THAILAND][0][49] = 127, [1][0][RTW89_FCC][1][51] = -4, [1][0][RTW89_FCC][2][51] = 127, [1][0][RTW89_ETSI][1][51] = 127, @@ -52734,6 +55340,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][51] = 127, [1][0][RTW89_MKK][0][51] = 127, [1][0][RTW89_IC][1][51] = -4, + [1][0][RTW89_IC][2][51] = 68, [1][0][RTW89_KCC][1][51] = -2, [1][0][RTW89_KCC][0][51] = 127, [1][0][RTW89_ACMA][1][51] = 127, @@ -52743,6 +55350,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][51] = 127, [1][0][RTW89_UK][1][51] = 127, [1][0][RTW89_UK][0][51] = 127, + [1][0][RTW89_THAILAND][1][51] = 127, + [1][0][RTW89_THAILAND][0][51] = 127, [1][0][RTW89_FCC][1][53] = -4, [1][0][RTW89_FCC][2][53] = 127, [1][0][RTW89_ETSI][1][53] = 127, @@ -52750,6 +55359,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][53] = 127, [1][0][RTW89_MKK][0][53] = 127, [1][0][RTW89_IC][1][53] = -4, + [1][0][RTW89_IC][2][53] = 68, [1][0][RTW89_KCC][1][53] = -2, [1][0][RTW89_KCC][0][53] = 127, [1][0][RTW89_ACMA][1][53] = 127, @@ -52759,6 +55369,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][53] = 127, [1][0][RTW89_UK][1][53] = 127, [1][0][RTW89_UK][0][53] = 127, + [1][0][RTW89_THAILAND][1][53] = 127, + [1][0][RTW89_THAILAND][0][53] = 127, [1][0][RTW89_FCC][1][55] = -4, [1][0][RTW89_FCC][2][55] = 68, [1][0][RTW89_ETSI][1][55] = 127, @@ -52766,6 +55378,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][55] = 127, [1][0][RTW89_MKK][0][55] = 127, [1][0][RTW89_IC][1][55] = -4, + [1][0][RTW89_IC][2][55] = 68, [1][0][RTW89_KCC][1][55] = -2, [1][0][RTW89_KCC][0][55] = 127, [1][0][RTW89_ACMA][1][55] = 127, @@ -52775,6 +55388,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][55] = 127, [1][0][RTW89_UK][1][55] = 127, [1][0][RTW89_UK][0][55] = 127, + [1][0][RTW89_THAILAND][1][55] = 127, + [1][0][RTW89_THAILAND][0][55] = 127, [1][0][RTW89_FCC][1][57] = -4, [1][0][RTW89_FCC][2][57] = 68, [1][0][RTW89_ETSI][1][57] = 127, @@ -52782,6 +55397,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][57] = 127, [1][0][RTW89_MKK][0][57] = 127, [1][0][RTW89_IC][1][57] = -4, + [1][0][RTW89_IC][2][57] = 68, [1][0][RTW89_KCC][1][57] = -2, [1][0][RTW89_KCC][0][57] = 127, [1][0][RTW89_ACMA][1][57] = 127, @@ -52791,6 +55407,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][57] = 127, [1][0][RTW89_UK][1][57] = 127, [1][0][RTW89_UK][0][57] = 127, + [1][0][RTW89_THAILAND][1][57] = 127, + [1][0][RTW89_THAILAND][0][57] = 127, [1][0][RTW89_FCC][1][59] = -4, [1][0][RTW89_FCC][2][59] = 68, [1][0][RTW89_ETSI][1][59] = 127, @@ -52798,6 +55416,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][59] = 127, [1][0][RTW89_MKK][0][59] = 127, [1][0][RTW89_IC][1][59] = -4, + [1][0][RTW89_IC][2][59] = 68, [1][0][RTW89_KCC][1][59] = -2, [1][0][RTW89_KCC][0][59] = 127, [1][0][RTW89_ACMA][1][59] = 127, @@ -52807,6 +55426,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][59] = 127, [1][0][RTW89_UK][1][59] = 127, [1][0][RTW89_UK][0][59] = 127, + [1][0][RTW89_THAILAND][1][59] = 127, + [1][0][RTW89_THAILAND][0][59] = 127, [1][0][RTW89_FCC][1][60] = -4, [1][0][RTW89_FCC][2][60] = 68, [1][0][RTW89_ETSI][1][60] = 127, @@ -52814,6 +55435,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][60] = 127, [1][0][RTW89_MKK][0][60] = 127, [1][0][RTW89_IC][1][60] = -4, + [1][0][RTW89_IC][2][60] = 68, [1][0][RTW89_KCC][1][60] = -2, [1][0][RTW89_KCC][0][60] = 127, [1][0][RTW89_ACMA][1][60] = 127, @@ -52823,6 +55445,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][60] = 127, [1][0][RTW89_UK][1][60] = 127, [1][0][RTW89_UK][0][60] = 127, + [1][0][RTW89_THAILAND][1][60] = 127, + [1][0][RTW89_THAILAND][0][60] = 127, [1][0][RTW89_FCC][1][62] = -4, [1][0][RTW89_FCC][2][62] = 68, [1][0][RTW89_ETSI][1][62] = 127, @@ -52830,6 +55454,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][62] = 127, [1][0][RTW89_MKK][0][62] = 127, [1][0][RTW89_IC][1][62] = -4, + [1][0][RTW89_IC][2][62] = 68, [1][0][RTW89_KCC][1][62] = -2, [1][0][RTW89_KCC][0][62] = 127, [1][0][RTW89_ACMA][1][62] = 127, @@ -52839,6 +55464,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][62] = 127, [1][0][RTW89_UK][1][62] = 127, [1][0][RTW89_UK][0][62] = 127, + [1][0][RTW89_THAILAND][1][62] = 127, + [1][0][RTW89_THAILAND][0][62] = 127, [1][0][RTW89_FCC][1][64] = -4, [1][0][RTW89_FCC][2][64] = 68, [1][0][RTW89_ETSI][1][64] = 127, @@ -52846,6 +55473,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][64] = 127, [1][0][RTW89_MKK][0][64] = 127, [1][0][RTW89_IC][1][64] = -4, + [1][0][RTW89_IC][2][64] = 68, [1][0][RTW89_KCC][1][64] = -2, [1][0][RTW89_KCC][0][64] = 127, [1][0][RTW89_ACMA][1][64] = 127, @@ -52855,6 +55483,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][64] = 127, [1][0][RTW89_UK][1][64] = 127, [1][0][RTW89_UK][0][64] = 127, + [1][0][RTW89_THAILAND][1][64] = 127, + [1][0][RTW89_THAILAND][0][64] = 127, [1][0][RTW89_FCC][1][66] = -4, [1][0][RTW89_FCC][2][66] = 68, [1][0][RTW89_ETSI][1][66] = 127, @@ -52862,6 +55492,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][66] = 127, [1][0][RTW89_MKK][0][66] = 127, [1][0][RTW89_IC][1][66] = -4, + [1][0][RTW89_IC][2][66] = 68, [1][0][RTW89_KCC][1][66] = -2, [1][0][RTW89_KCC][0][66] = 127, [1][0][RTW89_ACMA][1][66] = 127, @@ -52871,6 +55502,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][66] = 127, [1][0][RTW89_UK][1][66] = 127, [1][0][RTW89_UK][0][66] = 127, + [1][0][RTW89_THAILAND][1][66] = 127, + [1][0][RTW89_THAILAND][0][66] = 127, [1][0][RTW89_FCC][1][68] = -4, [1][0][RTW89_FCC][2][68] = 68, [1][0][RTW89_ETSI][1][68] = 127, @@ -52878,6 +55511,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][68] = 127, [1][0][RTW89_MKK][0][68] = 127, [1][0][RTW89_IC][1][68] = -4, + [1][0][RTW89_IC][2][68] = 68, [1][0][RTW89_KCC][1][68] = -2, [1][0][RTW89_KCC][0][68] = 127, [1][0][RTW89_ACMA][1][68] = 127, @@ -52887,6 +55521,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][68] = 127, [1][0][RTW89_UK][1][68] = 127, [1][0][RTW89_UK][0][68] = 127, + [1][0][RTW89_THAILAND][1][68] = 127, + [1][0][RTW89_THAILAND][0][68] = 127, [1][0][RTW89_FCC][1][70] = -4, [1][0][RTW89_FCC][2][70] = 68, [1][0][RTW89_ETSI][1][70] = 127, @@ -52894,6 +55530,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][70] = 127, [1][0][RTW89_MKK][0][70] = 127, [1][0][RTW89_IC][1][70] = -4, + [1][0][RTW89_IC][2][70] = 68, [1][0][RTW89_KCC][1][70] = -2, [1][0][RTW89_KCC][0][70] = 127, [1][0][RTW89_ACMA][1][70] = 127, @@ -52903,6 +55540,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][70] = 127, [1][0][RTW89_UK][1][70] = 127, [1][0][RTW89_UK][0][70] = 127, + [1][0][RTW89_THAILAND][1][70] = 127, + [1][0][RTW89_THAILAND][0][70] = 127, [1][0][RTW89_FCC][1][72] = -4, [1][0][RTW89_FCC][2][72] = 68, [1][0][RTW89_ETSI][1][72] = 127, @@ -52910,6 +55549,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][72] = 127, [1][0][RTW89_MKK][0][72] = 127, [1][0][RTW89_IC][1][72] = -4, + [1][0][RTW89_IC][2][72] = 68, [1][0][RTW89_KCC][1][72] = -2, [1][0][RTW89_KCC][0][72] = 127, [1][0][RTW89_ACMA][1][72] = 127, @@ -52919,6 +55559,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][72] = 127, [1][0][RTW89_UK][1][72] = 127, [1][0][RTW89_UK][0][72] = 127, + [1][0][RTW89_THAILAND][1][72] = 127, + [1][0][RTW89_THAILAND][0][72] = 127, [1][0][RTW89_FCC][1][74] = -4, [1][0][RTW89_FCC][2][74] = 68, [1][0][RTW89_ETSI][1][74] = 127, @@ -52926,6 +55568,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][74] = 127, [1][0][RTW89_MKK][0][74] = 127, [1][0][RTW89_IC][1][74] = -4, + [1][0][RTW89_IC][2][74] = 68, [1][0][RTW89_KCC][1][74] = -2, [1][0][RTW89_KCC][0][74] = 127, [1][0][RTW89_ACMA][1][74] = 127, @@ -52935,6 +55578,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][74] = 127, [1][0][RTW89_UK][1][74] = 127, [1][0][RTW89_UK][0][74] = 127, + [1][0][RTW89_THAILAND][1][74] = 127, + [1][0][RTW89_THAILAND][0][74] = 127, [1][0][RTW89_FCC][1][75] = -4, [1][0][RTW89_FCC][2][75] = 68, [1][0][RTW89_ETSI][1][75] = 127, @@ -52942,6 +55587,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][75] = 127, [1][0][RTW89_MKK][0][75] = 127, [1][0][RTW89_IC][1][75] = -4, + [1][0][RTW89_IC][2][75] = 68, [1][0][RTW89_KCC][1][75] = -2, [1][0][RTW89_KCC][0][75] = 127, [1][0][RTW89_ACMA][1][75] = 127, @@ -52951,6 +55597,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][75] = 127, [1][0][RTW89_UK][1][75] = 127, [1][0][RTW89_UK][0][75] = 127, + [1][0][RTW89_THAILAND][1][75] = 127, + [1][0][RTW89_THAILAND][0][75] = 127, [1][0][RTW89_FCC][1][77] = -4, [1][0][RTW89_FCC][2][77] = 68, [1][0][RTW89_ETSI][1][77] = 127, @@ -52958,6 +55606,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][77] = 127, [1][0][RTW89_MKK][0][77] = 127, [1][0][RTW89_IC][1][77] = -4, + [1][0][RTW89_IC][2][77] = 68, [1][0][RTW89_KCC][1][77] = -2, [1][0][RTW89_KCC][0][77] = 127, [1][0][RTW89_ACMA][1][77] = 127, @@ -52967,6 +55616,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][77] = 127, [1][0][RTW89_UK][1][77] = 127, [1][0][RTW89_UK][0][77] = 127, + [1][0][RTW89_THAILAND][1][77] = 127, + [1][0][RTW89_THAILAND][0][77] = 127, [1][0][RTW89_FCC][1][79] = -4, [1][0][RTW89_FCC][2][79] = 68, [1][0][RTW89_ETSI][1][79] = 127, @@ -52974,6 +55625,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][79] = 127, [1][0][RTW89_MKK][0][79] = 127, [1][0][RTW89_IC][1][79] = -4, + [1][0][RTW89_IC][2][79] = 68, [1][0][RTW89_KCC][1][79] = -2, [1][0][RTW89_KCC][0][79] = 127, [1][0][RTW89_ACMA][1][79] = 127, @@ -52983,6 +55635,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][79] = 127, [1][0][RTW89_UK][1][79] = 127, [1][0][RTW89_UK][0][79] = 127, + [1][0][RTW89_THAILAND][1][79] = 127, + [1][0][RTW89_THAILAND][0][79] = 127, [1][0][RTW89_FCC][1][81] = -4, [1][0][RTW89_FCC][2][81] = 68, [1][0][RTW89_ETSI][1][81] = 127, @@ -52990,6 +55644,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][81] = 127, [1][0][RTW89_MKK][0][81] = 127, [1][0][RTW89_IC][1][81] = -4, + [1][0][RTW89_IC][2][81] = 68, [1][0][RTW89_KCC][1][81] = -2, [1][0][RTW89_KCC][0][81] = 127, [1][0][RTW89_ACMA][1][81] = 127, @@ -52999,6 +55654,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][81] = 127, [1][0][RTW89_UK][1][81] = 127, [1][0][RTW89_UK][0][81] = 127, + [1][0][RTW89_THAILAND][1][81] = 127, + [1][0][RTW89_THAILAND][0][81] = 127, [1][0][RTW89_FCC][1][83] = -4, [1][0][RTW89_FCC][2][83] = 68, [1][0][RTW89_ETSI][1][83] = 127, @@ -53006,6 +55663,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][83] = 127, [1][0][RTW89_MKK][0][83] = 127, [1][0][RTW89_IC][1][83] = -4, + [1][0][RTW89_IC][2][83] = 68, [1][0][RTW89_KCC][1][83] = -2, [1][0][RTW89_KCC][0][83] = 127, [1][0][RTW89_ACMA][1][83] = 127, @@ -53015,6 +55673,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][83] = 127, [1][0][RTW89_UK][1][83] = 127, [1][0][RTW89_UK][0][83] = 127, + [1][0][RTW89_THAILAND][1][83] = 127, + [1][0][RTW89_THAILAND][0][83] = 127, [1][0][RTW89_FCC][1][85] = -4, [1][0][RTW89_FCC][2][85] = 68, [1][0][RTW89_ETSI][1][85] = 127, @@ -53022,6 +55682,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][85] = 127, [1][0][RTW89_MKK][0][85] = 127, [1][0][RTW89_IC][1][85] = -4, + [1][0][RTW89_IC][2][85] = 68, [1][0][RTW89_KCC][1][85] = -2, [1][0][RTW89_KCC][0][85] = 127, [1][0][RTW89_ACMA][1][85] = 127, @@ -53031,6 +55692,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][85] = 127, [1][0][RTW89_UK][1][85] = 127, [1][0][RTW89_UK][0][85] = 127, + [1][0][RTW89_THAILAND][1][85] = 127, + [1][0][RTW89_THAILAND][0][85] = 127, [1][0][RTW89_FCC][1][87] = -4, [1][0][RTW89_FCC][2][87] = 127, [1][0][RTW89_ETSI][1][87] = 127, @@ -53038,6 +55701,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][87] = 127, [1][0][RTW89_MKK][0][87] = 127, [1][0][RTW89_IC][1][87] = -4, + [1][0][RTW89_IC][2][87] = 127, [1][0][RTW89_KCC][1][87] = -2, [1][0][RTW89_KCC][0][87] = 127, [1][0][RTW89_ACMA][1][87] = 127, @@ -53047,6 +55711,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][87] = 127, [1][0][RTW89_UK][1][87] = 127, [1][0][RTW89_UK][0][87] = 127, + [1][0][RTW89_THAILAND][1][87] = 127, + [1][0][RTW89_THAILAND][0][87] = 127, [1][0][RTW89_FCC][1][89] = -4, [1][0][RTW89_FCC][2][89] = 127, [1][0][RTW89_ETSI][1][89] = 127, @@ -53054,6 +55720,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][89] = 127, [1][0][RTW89_MKK][0][89] = 127, [1][0][RTW89_IC][1][89] = -4, + [1][0][RTW89_IC][2][89] = 127, [1][0][RTW89_KCC][1][89] = -2, [1][0][RTW89_KCC][0][89] = 127, [1][0][RTW89_ACMA][1][89] = 127, @@ -53063,6 +55730,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][89] = 127, [1][0][RTW89_UK][1][89] = 127, [1][0][RTW89_UK][0][89] = 127, + [1][0][RTW89_THAILAND][1][89] = 127, + [1][0][RTW89_THAILAND][0][89] = 127, [1][0][RTW89_FCC][1][90] = -4, [1][0][RTW89_FCC][2][90] = 127, [1][0][RTW89_ETSI][1][90] = 127, @@ -53070,6 +55739,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][90] = 127, [1][0][RTW89_MKK][0][90] = 127, [1][0][RTW89_IC][1][90] = -4, + [1][0][RTW89_IC][2][90] = 127, [1][0][RTW89_KCC][1][90] = -2, [1][0][RTW89_KCC][0][90] = 127, [1][0][RTW89_ACMA][1][90] = 127, @@ -53079,6 +55749,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][90] = 127, [1][0][RTW89_UK][1][90] = 127, [1][0][RTW89_UK][0][90] = 127, + [1][0][RTW89_THAILAND][1][90] = 127, + [1][0][RTW89_THAILAND][0][90] = 127, [1][0][RTW89_FCC][1][92] = -4, [1][0][RTW89_FCC][2][92] = 127, [1][0][RTW89_ETSI][1][92] = 127, @@ -53086,6 +55758,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][92] = 127, [1][0][RTW89_MKK][0][92] = 127, [1][0][RTW89_IC][1][92] = -4, + [1][0][RTW89_IC][2][92] = 127, [1][0][RTW89_KCC][1][92] = -2, [1][0][RTW89_KCC][0][92] = 127, [1][0][RTW89_ACMA][1][92] = 127, @@ -53095,6 +55768,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][92] = 127, [1][0][RTW89_UK][1][92] = 127, [1][0][RTW89_UK][0][92] = 127, + [1][0][RTW89_THAILAND][1][92] = 127, + [1][0][RTW89_THAILAND][0][92] = 127, [1][0][RTW89_FCC][1][94] = -4, [1][0][RTW89_FCC][2][94] = 127, [1][0][RTW89_ETSI][1][94] = 127, @@ -53102,6 +55777,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][94] = 127, [1][0][RTW89_MKK][0][94] = 127, [1][0][RTW89_IC][1][94] = -4, + [1][0][RTW89_IC][2][94] = 127, [1][0][RTW89_KCC][1][94] = -2, [1][0][RTW89_KCC][0][94] = 127, [1][0][RTW89_ACMA][1][94] = 127, @@ -53111,6 +55787,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][94] = 127, [1][0][RTW89_UK][1][94] = 127, [1][0][RTW89_UK][0][94] = 127, + [1][0][RTW89_THAILAND][1][94] = 127, + [1][0][RTW89_THAILAND][0][94] = 127, [1][0][RTW89_FCC][1][96] = -4, [1][0][RTW89_FCC][2][96] = 127, [1][0][RTW89_ETSI][1][96] = 127, @@ -53118,6 +55796,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][96] = 127, [1][0][RTW89_MKK][0][96] = 127, [1][0][RTW89_IC][1][96] = -4, + [1][0][RTW89_IC][2][96] = 127, [1][0][RTW89_KCC][1][96] = -2, [1][0][RTW89_KCC][0][96] = 127, [1][0][RTW89_ACMA][1][96] = 127, @@ -53127,6 +55806,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][96] = 127, [1][0][RTW89_UK][1][96] = 127, [1][0][RTW89_UK][0][96] = 127, + [1][0][RTW89_THAILAND][1][96] = 127, + [1][0][RTW89_THAILAND][0][96] = 127, [1][0][RTW89_FCC][1][98] = -4, [1][0][RTW89_FCC][2][98] = 127, [1][0][RTW89_ETSI][1][98] = 127, @@ -53134,6 +55815,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][98] = 127, [1][0][RTW89_MKK][0][98] = 127, [1][0][RTW89_IC][1][98] = -4, + [1][0][RTW89_IC][2][98] = 127, [1][0][RTW89_KCC][1][98] = -2, [1][0][RTW89_KCC][0][98] = 127, [1][0][RTW89_ACMA][1][98] = 127, @@ -53143,6 +55825,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][98] = 127, [1][0][RTW89_UK][1][98] = 127, [1][0][RTW89_UK][0][98] = 127, + [1][0][RTW89_THAILAND][1][98] = 127, + [1][0][RTW89_THAILAND][0][98] = 127, [1][0][RTW89_FCC][1][100] = -4, [1][0][RTW89_FCC][2][100] = 127, [1][0][RTW89_ETSI][1][100] = 127, @@ -53150,6 +55834,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][100] = 127, [1][0][RTW89_MKK][0][100] = 127, [1][0][RTW89_IC][1][100] = -4, + [1][0][RTW89_IC][2][100] = 127, [1][0][RTW89_KCC][1][100] = -2, [1][0][RTW89_KCC][0][100] = 127, [1][0][RTW89_ACMA][1][100] = 127, @@ -53159,6 +55844,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][100] = 127, [1][0][RTW89_UK][1][100] = 127, [1][0][RTW89_UK][0][100] = 127, + [1][0][RTW89_THAILAND][1][100] = 127, + [1][0][RTW89_THAILAND][0][100] = 127, [1][0][RTW89_FCC][1][102] = -4, [1][0][RTW89_FCC][2][102] = 127, [1][0][RTW89_ETSI][1][102] = 127, @@ -53166,6 +55853,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][102] = 127, [1][0][RTW89_MKK][0][102] = 127, [1][0][RTW89_IC][1][102] = -4, + [1][0][RTW89_IC][2][102] = 127, [1][0][RTW89_KCC][1][102] = -2, [1][0][RTW89_KCC][0][102] = 127, [1][0][RTW89_ACMA][1][102] = 127, @@ -53175,6 +55863,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][102] = 127, [1][0][RTW89_UK][1][102] = 127, [1][0][RTW89_UK][0][102] = 127, + [1][0][RTW89_THAILAND][1][102] = 127, + [1][0][RTW89_THAILAND][0][102] = 127, [1][0][RTW89_FCC][1][104] = -4, [1][0][RTW89_FCC][2][104] = 127, [1][0][RTW89_ETSI][1][104] = 127, @@ -53182,6 +55872,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][104] = 127, [1][0][RTW89_MKK][0][104] = 127, [1][0][RTW89_IC][1][104] = -4, + [1][0][RTW89_IC][2][104] = 127, [1][0][RTW89_KCC][1][104] = -2, [1][0][RTW89_KCC][0][104] = 127, [1][0][RTW89_ACMA][1][104] = 127, @@ -53191,6 +55882,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][104] = 127, [1][0][RTW89_UK][1][104] = 127, [1][0][RTW89_UK][0][104] = 127, + [1][0][RTW89_THAILAND][1][104] = 127, + [1][0][RTW89_THAILAND][0][104] = 127, [1][0][RTW89_FCC][1][105] = -4, [1][0][RTW89_FCC][2][105] = 127, [1][0][RTW89_ETSI][1][105] = 127, @@ -53198,6 +55891,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][105] = 127, [1][0][RTW89_MKK][0][105] = 127, [1][0][RTW89_IC][1][105] = -4, + [1][0][RTW89_IC][2][105] = 127, [1][0][RTW89_KCC][1][105] = -2, [1][0][RTW89_KCC][0][105] = 127, [1][0][RTW89_ACMA][1][105] = 127, @@ -53207,6 +55901,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][105] = 127, [1][0][RTW89_UK][1][105] = 127, [1][0][RTW89_UK][0][105] = 127, + [1][0][RTW89_THAILAND][1][105] = 127, + [1][0][RTW89_THAILAND][0][105] = 127, [1][0][RTW89_FCC][1][107] = 1, [1][0][RTW89_FCC][2][107] = 127, [1][0][RTW89_ETSI][1][107] = 127, @@ -53214,6 +55910,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][107] = 127, [1][0][RTW89_MKK][0][107] = 127, [1][0][RTW89_IC][1][107] = 1, + [1][0][RTW89_IC][2][107] = 127, [1][0][RTW89_KCC][1][107] = -2, [1][0][RTW89_KCC][0][107] = 127, [1][0][RTW89_ACMA][1][107] = 127, @@ -53223,6 +55920,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][107] = 127, [1][0][RTW89_UK][1][107] = 127, [1][0][RTW89_UK][0][107] = 127, + [1][0][RTW89_THAILAND][1][107] = 127, + [1][0][RTW89_THAILAND][0][107] = 127, [1][0][RTW89_FCC][1][109] = 2, [1][0][RTW89_FCC][2][109] = 127, [1][0][RTW89_ETSI][1][109] = 127, @@ -53230,6 +55929,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][109] = 127, [1][0][RTW89_MKK][0][109] = 127, [1][0][RTW89_IC][1][109] = 2, + [1][0][RTW89_IC][2][109] = 127, [1][0][RTW89_KCC][1][109] = 127, [1][0][RTW89_KCC][0][109] = 127, [1][0][RTW89_ACMA][1][109] = 127, @@ -53239,6 +55939,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][109] = 127, [1][0][RTW89_UK][1][109] = 127, [1][0][RTW89_UK][0][109] = 127, + [1][0][RTW89_THAILAND][1][109] = 127, + [1][0][RTW89_THAILAND][0][109] = 127, [1][0][RTW89_FCC][1][111] = 127, [1][0][RTW89_FCC][2][111] = 127, [1][0][RTW89_ETSI][1][111] = 127, @@ -53246,6 +55948,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][111] = 127, [1][0][RTW89_MKK][0][111] = 127, [1][0][RTW89_IC][1][111] = 127, + [1][0][RTW89_IC][2][111] = 127, [1][0][RTW89_KCC][1][111] = 127, [1][0][RTW89_KCC][0][111] = 127, [1][0][RTW89_ACMA][1][111] = 127, @@ -53255,6 +55958,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][111] = 127, [1][0][RTW89_UK][1][111] = 127, [1][0][RTW89_UK][0][111] = 127, + [1][0][RTW89_THAILAND][1][111] = 127, + [1][0][RTW89_THAILAND][0][111] = 127, [1][0][RTW89_FCC][1][113] = 127, [1][0][RTW89_FCC][2][113] = 127, [1][0][RTW89_ETSI][1][113] = 127, @@ -53262,6 +55967,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][113] = 127, [1][0][RTW89_MKK][0][113] = 127, [1][0][RTW89_IC][1][113] = 127, + [1][0][RTW89_IC][2][113] = 127, [1][0][RTW89_KCC][1][113] = 127, [1][0][RTW89_KCC][0][113] = 127, [1][0][RTW89_ACMA][1][113] = 127, @@ -53271,6 +55977,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][113] = 127, [1][0][RTW89_UK][1][113] = 127, [1][0][RTW89_UK][0][113] = 127, + [1][0][RTW89_THAILAND][1][113] = 127, + [1][0][RTW89_THAILAND][0][113] = 127, [1][0][RTW89_FCC][1][115] = 127, [1][0][RTW89_FCC][2][115] = 127, [1][0][RTW89_ETSI][1][115] = 127, @@ -53278,6 +55986,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][115] = 127, [1][0][RTW89_MKK][0][115] = 127, [1][0][RTW89_IC][1][115] = 127, + [1][0][RTW89_IC][2][115] = 127, [1][0][RTW89_KCC][1][115] = 127, [1][0][RTW89_KCC][0][115] = 127, [1][0][RTW89_ACMA][1][115] = 127, @@ -53287,6 +55996,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][115] = 127, [1][0][RTW89_UK][1][115] = 127, [1][0][RTW89_UK][0][115] = 127, + [1][0][RTW89_THAILAND][1][115] = 127, + [1][0][RTW89_THAILAND][0][115] = 127, [1][0][RTW89_FCC][1][117] = 127, [1][0][RTW89_FCC][2][117] = 127, [1][0][RTW89_ETSI][1][117] = 127, @@ -53294,6 +56005,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][117] = 127, [1][0][RTW89_MKK][0][117] = 127, [1][0][RTW89_IC][1][117] = 127, + [1][0][RTW89_IC][2][117] = 127, [1][0][RTW89_KCC][1][117] = 127, [1][0][RTW89_KCC][0][117] = 127, [1][0][RTW89_ACMA][1][117] = 127, @@ -53303,6 +56015,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][117] = 127, [1][0][RTW89_UK][1][117] = 127, [1][0][RTW89_UK][0][117] = 127, + [1][0][RTW89_THAILAND][1][117] = 127, + [1][0][RTW89_THAILAND][0][117] = 127, [1][0][RTW89_FCC][1][119] = 127, [1][0][RTW89_FCC][2][119] = 127, [1][0][RTW89_ETSI][1][119] = 127, @@ -53310,6 +56024,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][119] = 127, [1][0][RTW89_MKK][0][119] = 127, [1][0][RTW89_IC][1][119] = 127, + [1][0][RTW89_IC][2][119] = 127, [1][0][RTW89_KCC][1][119] = 127, [1][0][RTW89_KCC][0][119] = 127, [1][0][RTW89_ACMA][1][119] = 127, @@ -53319,6 +56034,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][119] = 127, [1][0][RTW89_UK][1][119] = 127, [1][0][RTW89_UK][0][119] = 127, + [1][0][RTW89_THAILAND][1][119] = 127, + [1][0][RTW89_THAILAND][0][119] = 127, [1][1][RTW89_FCC][1][0] = -26, [1][1][RTW89_FCC][2][0] = 44, [1][1][RTW89_ETSI][1][0] = 32, @@ -53326,6 +56043,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][0] = 30, [1][1][RTW89_MKK][0][0] = -10, [1][1][RTW89_IC][1][0] = -26, + [1][1][RTW89_IC][2][0] = 44, [1][1][RTW89_KCC][1][0] = -14, [1][1][RTW89_KCC][0][0] = -14, [1][1][RTW89_ACMA][1][0] = 32, @@ -53335,6 +56053,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][0] = -6, [1][1][RTW89_UK][1][0] = 32, [1][1][RTW89_UK][0][0] = -6, + [1][1][RTW89_THAILAND][1][0] = 18, + [1][1][RTW89_THAILAND][0][0] = -26, [1][1][RTW89_FCC][1][2] = -28, [1][1][RTW89_FCC][2][2] = 44, [1][1][RTW89_ETSI][1][2] = 32, @@ -53342,6 +56062,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][2] = 30, [1][1][RTW89_MKK][0][2] = -10, [1][1][RTW89_IC][1][2] = -28, + [1][1][RTW89_IC][2][2] = 44, [1][1][RTW89_KCC][1][2] = -14, [1][1][RTW89_KCC][0][2] = -14, [1][1][RTW89_ACMA][1][2] = 32, @@ -53351,6 +56072,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][2] = -6, [1][1][RTW89_UK][1][2] = 32, [1][1][RTW89_UK][0][2] = -6, + [1][1][RTW89_THAILAND][1][2] = 18, + [1][1][RTW89_THAILAND][0][2] = -28, [1][1][RTW89_FCC][1][4] = -28, [1][1][RTW89_FCC][2][4] = 44, [1][1][RTW89_ETSI][1][4] = 32, @@ -53358,6 +56081,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][4] = 30, [1][1][RTW89_MKK][0][4] = -10, [1][1][RTW89_IC][1][4] = -28, + [1][1][RTW89_IC][2][4] = 44, [1][1][RTW89_KCC][1][4] = -14, [1][1][RTW89_KCC][0][4] = -14, [1][1][RTW89_ACMA][1][4] = 32, @@ -53367,6 +56091,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][4] = -6, [1][1][RTW89_UK][1][4] = 32, [1][1][RTW89_UK][0][4] = -6, + [1][1][RTW89_THAILAND][1][4] = 18, + [1][1][RTW89_THAILAND][0][4] = -28, [1][1][RTW89_FCC][1][6] = -28, [1][1][RTW89_FCC][2][6] = 44, [1][1][RTW89_ETSI][1][6] = 32, @@ -53374,6 +56100,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][6] = 30, [1][1][RTW89_MKK][0][6] = -10, [1][1][RTW89_IC][1][6] = -28, + [1][1][RTW89_IC][2][6] = 44, [1][1][RTW89_KCC][1][6] = -14, [1][1][RTW89_KCC][0][6] = -14, [1][1][RTW89_ACMA][1][6] = 32, @@ -53383,6 +56110,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][6] = -6, [1][1][RTW89_UK][1][6] = 32, [1][1][RTW89_UK][0][6] = -6, + [1][1][RTW89_THAILAND][1][6] = 18, + [1][1][RTW89_THAILAND][0][6] = -28, [1][1][RTW89_FCC][1][8] = -28, [1][1][RTW89_FCC][2][8] = 44, [1][1][RTW89_ETSI][1][8] = 32, @@ -53390,6 +56119,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][8] = 30, [1][1][RTW89_MKK][0][8] = -10, [1][1][RTW89_IC][1][8] = -28, + [1][1][RTW89_IC][2][8] = 44, [1][1][RTW89_KCC][1][8] = -14, [1][1][RTW89_KCC][0][8] = -14, [1][1][RTW89_ACMA][1][8] = 32, @@ -53399,6 +56129,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][8] = -6, [1][1][RTW89_UK][1][8] = 32, [1][1][RTW89_UK][0][8] = -6, + [1][1][RTW89_THAILAND][1][8] = 18, + [1][1][RTW89_THAILAND][0][8] = -28, [1][1][RTW89_FCC][1][10] = -28, [1][1][RTW89_FCC][2][10] = 44, [1][1][RTW89_ETSI][1][10] = 32, @@ -53406,6 +56138,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][10] = 30, [1][1][RTW89_MKK][0][10] = -10, [1][1][RTW89_IC][1][10] = -28, + [1][1][RTW89_IC][2][10] = 44, [1][1][RTW89_KCC][1][10] = -14, [1][1][RTW89_KCC][0][10] = -14, [1][1][RTW89_ACMA][1][10] = 32, @@ -53415,6 +56148,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][10] = -6, [1][1][RTW89_UK][1][10] = 32, [1][1][RTW89_UK][0][10] = -6, + [1][1][RTW89_THAILAND][1][10] = 18, + [1][1][RTW89_THAILAND][0][10] = -28, [1][1][RTW89_FCC][1][12] = -28, [1][1][RTW89_FCC][2][12] = 44, [1][1][RTW89_ETSI][1][12] = 32, @@ -53422,6 +56157,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][12] = 30, [1][1][RTW89_MKK][0][12] = -10, [1][1][RTW89_IC][1][12] = -28, + [1][1][RTW89_IC][2][12] = 44, [1][1][RTW89_KCC][1][12] = -14, [1][1][RTW89_KCC][0][12] = -14, [1][1][RTW89_ACMA][1][12] = 32, @@ -53431,6 +56167,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][12] = -6, [1][1][RTW89_UK][1][12] = 32, [1][1][RTW89_UK][0][12] = -6, + [1][1][RTW89_THAILAND][1][12] = 18, + [1][1][RTW89_THAILAND][0][12] = -28, [1][1][RTW89_FCC][1][14] = -28, [1][1][RTW89_FCC][2][14] = 44, [1][1][RTW89_ETSI][1][14] = 32, @@ -53438,6 +56176,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][14] = 30, [1][1][RTW89_MKK][0][14] = -10, [1][1][RTW89_IC][1][14] = -28, + [1][1][RTW89_IC][2][14] = 44, [1][1][RTW89_KCC][1][14] = -14, [1][1][RTW89_KCC][0][14] = -14, [1][1][RTW89_ACMA][1][14] = 32, @@ -53447,6 +56186,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][14] = -6, [1][1][RTW89_UK][1][14] = 32, [1][1][RTW89_UK][0][14] = -6, + [1][1][RTW89_THAILAND][1][14] = 18, + [1][1][RTW89_THAILAND][0][14] = -28, [1][1][RTW89_FCC][1][15] = -28, [1][1][RTW89_FCC][2][15] = 44, [1][1][RTW89_ETSI][1][15] = 32, @@ -53454,6 +56195,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][15] = 30, [1][1][RTW89_MKK][0][15] = -10, [1][1][RTW89_IC][1][15] = -28, + [1][1][RTW89_IC][2][15] = 44, [1][1][RTW89_KCC][1][15] = -14, [1][1][RTW89_KCC][0][15] = -14, [1][1][RTW89_ACMA][1][15] = 32, @@ -53463,6 +56205,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][15] = -6, [1][1][RTW89_UK][1][15] = 32, [1][1][RTW89_UK][0][15] = -6, + [1][1][RTW89_THAILAND][1][15] = 18, + [1][1][RTW89_THAILAND][0][15] = -28, [1][1][RTW89_FCC][1][17] = -28, [1][1][RTW89_FCC][2][17] = 44, [1][1][RTW89_ETSI][1][17] = 32, @@ -53470,6 +56214,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][17] = 30, [1][1][RTW89_MKK][0][17] = -10, [1][1][RTW89_IC][1][17] = -28, + [1][1][RTW89_IC][2][17] = 44, [1][1][RTW89_KCC][1][17] = -14, [1][1][RTW89_KCC][0][17] = -14, [1][1][RTW89_ACMA][1][17] = 32, @@ -53479,6 +56224,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][17] = -6, [1][1][RTW89_UK][1][17] = 32, [1][1][RTW89_UK][0][17] = -6, + [1][1][RTW89_THAILAND][1][17] = 18, + [1][1][RTW89_THAILAND][0][17] = -28, [1][1][RTW89_FCC][1][19] = -28, [1][1][RTW89_FCC][2][19] = 44, [1][1][RTW89_ETSI][1][19] = 32, @@ -53486,6 +56233,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][19] = 30, [1][1][RTW89_MKK][0][19] = -10, [1][1][RTW89_IC][1][19] = -28, + [1][1][RTW89_IC][2][19] = 44, [1][1][RTW89_KCC][1][19] = -14, [1][1][RTW89_KCC][0][19] = -14, [1][1][RTW89_ACMA][1][19] = 32, @@ -53495,6 +56243,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][19] = -6, [1][1][RTW89_UK][1][19] = 32, [1][1][RTW89_UK][0][19] = -6, + [1][1][RTW89_THAILAND][1][19] = 18, + [1][1][RTW89_THAILAND][0][19] = -28, [1][1][RTW89_FCC][1][21] = -28, [1][1][RTW89_FCC][2][21] = 44, [1][1][RTW89_ETSI][1][21] = 32, @@ -53502,6 +56252,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][21] = 30, [1][1][RTW89_MKK][0][21] = -10, [1][1][RTW89_IC][1][21] = -28, + [1][1][RTW89_IC][2][21] = 44, [1][1][RTW89_KCC][1][21] = -14, [1][1][RTW89_KCC][0][21] = -14, [1][1][RTW89_ACMA][1][21] = 32, @@ -53511,6 +56262,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][21] = -6, [1][1][RTW89_UK][1][21] = 32, [1][1][RTW89_UK][0][21] = -6, + [1][1][RTW89_THAILAND][1][21] = 18, + [1][1][RTW89_THAILAND][0][21] = -28, [1][1][RTW89_FCC][1][23] = -28, [1][1][RTW89_FCC][2][23] = 44, [1][1][RTW89_ETSI][1][23] = 32, @@ -53518,6 +56271,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][23] = 32, [1][1][RTW89_MKK][0][23] = -10, [1][1][RTW89_IC][1][23] = -28, + [1][1][RTW89_IC][2][23] = 44, [1][1][RTW89_KCC][1][23] = -14, [1][1][RTW89_KCC][0][23] = -14, [1][1][RTW89_ACMA][1][23] = 32, @@ -53527,6 +56281,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][23] = -6, [1][1][RTW89_UK][1][23] = 32, [1][1][RTW89_UK][0][23] = -6, + [1][1][RTW89_THAILAND][1][23] = 18, + [1][1][RTW89_THAILAND][0][23] = -28, [1][1][RTW89_FCC][1][25] = -28, [1][1][RTW89_FCC][2][25] = 44, [1][1][RTW89_ETSI][1][25] = 32, @@ -53534,6 +56290,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][25] = 32, [1][1][RTW89_MKK][0][25] = -10, [1][1][RTW89_IC][1][25] = -28, + [1][1][RTW89_IC][2][25] = 44, [1][1][RTW89_KCC][1][25] = -14, [1][1][RTW89_KCC][0][25] = -14, [1][1][RTW89_ACMA][1][25] = 32, @@ -53543,6 +56300,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][25] = -6, [1][1][RTW89_UK][1][25] = 32, [1][1][RTW89_UK][0][25] = -6, + [1][1][RTW89_THAILAND][1][25] = 18, + [1][1][RTW89_THAILAND][0][25] = -28, [1][1][RTW89_FCC][1][27] = -28, [1][1][RTW89_FCC][2][27] = 44, [1][1][RTW89_ETSI][1][27] = 32, @@ -53550,6 +56309,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][27] = 32, [1][1][RTW89_MKK][0][27] = -10, [1][1][RTW89_IC][1][27] = -28, + [1][1][RTW89_IC][2][27] = 44, [1][1][RTW89_KCC][1][27] = -14, [1][1][RTW89_KCC][0][27] = -14, [1][1][RTW89_ACMA][1][27] = 32, @@ -53559,6 +56319,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][27] = -6, [1][1][RTW89_UK][1][27] = 32, [1][1][RTW89_UK][0][27] = -6, + [1][1][RTW89_THAILAND][1][27] = 18, + [1][1][RTW89_THAILAND][0][27] = -28, [1][1][RTW89_FCC][1][29] = -28, [1][1][RTW89_FCC][2][29] = 44, [1][1][RTW89_ETSI][1][29] = 32, @@ -53566,6 +56328,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][29] = 32, [1][1][RTW89_MKK][0][29] = -10, [1][1][RTW89_IC][1][29] = -28, + [1][1][RTW89_IC][2][29] = 44, [1][1][RTW89_KCC][1][29] = -14, [1][1][RTW89_KCC][0][29] = -14, [1][1][RTW89_ACMA][1][29] = 32, @@ -53575,6 +56338,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][29] = -6, [1][1][RTW89_UK][1][29] = 32, [1][1][RTW89_UK][0][29] = -6, + [1][1][RTW89_THAILAND][1][29] = 18, + [1][1][RTW89_THAILAND][0][29] = -28, [1][1][RTW89_FCC][1][30] = -28, [1][1][RTW89_FCC][2][30] = 44, [1][1][RTW89_ETSI][1][30] = 32, @@ -53582,6 +56347,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][30] = 32, [1][1][RTW89_MKK][0][30] = -10, [1][1][RTW89_IC][1][30] = -28, + [1][1][RTW89_IC][2][30] = 44, [1][1][RTW89_KCC][1][30] = -14, [1][1][RTW89_KCC][0][30] = -14, [1][1][RTW89_ACMA][1][30] = 32, @@ -53591,6 +56357,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][30] = -6, [1][1][RTW89_UK][1][30] = 32, [1][1][RTW89_UK][0][30] = -6, + [1][1][RTW89_THAILAND][1][30] = 18, + [1][1][RTW89_THAILAND][0][30] = -28, [1][1][RTW89_FCC][1][32] = -28, [1][1][RTW89_FCC][2][32] = 44, [1][1][RTW89_ETSI][1][32] = 32, @@ -53598,6 +56366,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][32] = 32, [1][1][RTW89_MKK][0][32] = -10, [1][1][RTW89_IC][1][32] = -28, + [1][1][RTW89_IC][2][32] = 44, [1][1][RTW89_KCC][1][32] = -14, [1][1][RTW89_KCC][0][32] = -14, [1][1][RTW89_ACMA][1][32] = 32, @@ -53607,6 +56376,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][32] = -6, [1][1][RTW89_UK][1][32] = 32, [1][1][RTW89_UK][0][32] = -6, + [1][1][RTW89_THAILAND][1][32] = 18, + [1][1][RTW89_THAILAND][0][32] = -28, [1][1][RTW89_FCC][1][34] = -28, [1][1][RTW89_FCC][2][34] = 44, [1][1][RTW89_ETSI][1][34] = 32, @@ -53614,6 +56385,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][34] = 32, [1][1][RTW89_MKK][0][34] = -10, [1][1][RTW89_IC][1][34] = -28, + [1][1][RTW89_IC][2][34] = 44, [1][1][RTW89_KCC][1][34] = -14, [1][1][RTW89_KCC][0][34] = -14, [1][1][RTW89_ACMA][1][34] = 32, @@ -53623,6 +56395,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][34] = -6, [1][1][RTW89_UK][1][34] = 32, [1][1][RTW89_UK][0][34] = -6, + [1][1][RTW89_THAILAND][1][34] = 18, + [1][1][RTW89_THAILAND][0][34] = -28, [1][1][RTW89_FCC][1][36] = -28, [1][1][RTW89_FCC][2][36] = 44, [1][1][RTW89_ETSI][1][36] = 32, @@ -53630,6 +56404,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][36] = 32, [1][1][RTW89_MKK][0][36] = -10, [1][1][RTW89_IC][1][36] = -28, + [1][1][RTW89_IC][2][36] = 44, [1][1][RTW89_KCC][1][36] = -14, [1][1][RTW89_KCC][0][36] = -14, [1][1][RTW89_ACMA][1][36] = 32, @@ -53639,6 +56414,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][36] = -6, [1][1][RTW89_UK][1][36] = 32, [1][1][RTW89_UK][0][36] = -6, + [1][1][RTW89_THAILAND][1][36] = 18, + [1][1][RTW89_THAILAND][0][36] = -28, [1][1][RTW89_FCC][1][38] = -28, [1][1][RTW89_FCC][2][38] = 44, [1][1][RTW89_ETSI][1][38] = 32, @@ -53646,6 +56423,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][38] = 32, [1][1][RTW89_MKK][0][38] = -10, [1][1][RTW89_IC][1][38] = -28, + [1][1][RTW89_IC][2][38] = 44, [1][1][RTW89_KCC][1][38] = -14, [1][1][RTW89_KCC][0][38] = -14, [1][1][RTW89_ACMA][1][38] = 32, @@ -53655,6 +56433,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][38] = -6, [1][1][RTW89_UK][1][38] = 32, [1][1][RTW89_UK][0][38] = -6, + [1][1][RTW89_THAILAND][1][38] = 18, + [1][1][RTW89_THAILAND][0][38] = -28, [1][1][RTW89_FCC][1][40] = -28, [1][1][RTW89_FCC][2][40] = 44, [1][1][RTW89_ETSI][1][40] = 32, @@ -53662,6 +56442,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][40] = 32, [1][1][RTW89_MKK][0][40] = -10, [1][1][RTW89_IC][1][40] = -28, + [1][1][RTW89_IC][2][40] = 44, [1][1][RTW89_KCC][1][40] = -14, [1][1][RTW89_KCC][0][40] = -14, [1][1][RTW89_ACMA][1][40] = 32, @@ -53671,6 +56452,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][40] = -6, [1][1][RTW89_UK][1][40] = 32, [1][1][RTW89_UK][0][40] = -6, + [1][1][RTW89_THAILAND][1][40] = 18, + [1][1][RTW89_THAILAND][0][40] = -28, [1][1][RTW89_FCC][1][42] = -28, [1][1][RTW89_FCC][2][42] = 44, [1][1][RTW89_ETSI][1][42] = 32, @@ -53678,6 +56461,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][42] = 32, [1][1][RTW89_MKK][0][42] = -10, [1][1][RTW89_IC][1][42] = -28, + [1][1][RTW89_IC][2][42] = 44, [1][1][RTW89_KCC][1][42] = -14, [1][1][RTW89_KCC][0][42] = -14, [1][1][RTW89_ACMA][1][42] = 32, @@ -53687,6 +56471,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][42] = -6, [1][1][RTW89_UK][1][42] = 32, [1][1][RTW89_UK][0][42] = -6, + [1][1][RTW89_THAILAND][1][42] = 18, + [1][1][RTW89_THAILAND][0][42] = -28, [1][1][RTW89_FCC][1][44] = -28, [1][1][RTW89_FCC][2][44] = 44, [1][1][RTW89_ETSI][1][44] = 34, @@ -53694,6 +56480,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][44] = 4, [1][1][RTW89_MKK][0][44] = -8, [1][1][RTW89_IC][1][44] = -28, + [1][1][RTW89_IC][2][44] = 44, [1][1][RTW89_KCC][1][44] = -14, [1][1][RTW89_KCC][0][44] = -14, [1][1][RTW89_ACMA][1][44] = 34, @@ -53703,6 +56490,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][44] = -4, [1][1][RTW89_UK][1][44] = 34, [1][1][RTW89_UK][0][44] = -4, + [1][1][RTW89_THAILAND][1][44] = 18, + [1][1][RTW89_THAILAND][0][44] = -28, [1][1][RTW89_FCC][1][45] = -26, [1][1][RTW89_FCC][2][45] = 127, [1][1][RTW89_ETSI][1][45] = 127, @@ -53710,6 +56499,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][45] = 127, [1][1][RTW89_MKK][0][45] = 127, [1][1][RTW89_IC][1][45] = -26, + [1][1][RTW89_IC][2][45] = 44, [1][1][RTW89_KCC][1][45] = -14, [1][1][RTW89_KCC][0][45] = 127, [1][1][RTW89_ACMA][1][45] = 127, @@ -53719,6 +56509,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][45] = 127, [1][1][RTW89_UK][1][45] = 127, [1][1][RTW89_UK][0][45] = 127, + [1][1][RTW89_THAILAND][1][45] = 127, + [1][1][RTW89_THAILAND][0][45] = 127, [1][1][RTW89_FCC][1][47] = -28, [1][1][RTW89_FCC][2][47] = 127, [1][1][RTW89_ETSI][1][47] = 127, @@ -53726,6 +56518,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][47] = 127, [1][1][RTW89_MKK][0][47] = 127, [1][1][RTW89_IC][1][47] = -28, + [1][1][RTW89_IC][2][47] = 44, [1][1][RTW89_KCC][1][47] = -14, [1][1][RTW89_KCC][0][47] = 127, [1][1][RTW89_ACMA][1][47] = 127, @@ -53735,6 +56528,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][47] = 127, [1][1][RTW89_UK][1][47] = 127, [1][1][RTW89_UK][0][47] = 127, + [1][1][RTW89_THAILAND][1][47] = 127, + [1][1][RTW89_THAILAND][0][47] = 127, [1][1][RTW89_FCC][1][49] = -28, [1][1][RTW89_FCC][2][49] = 127, [1][1][RTW89_ETSI][1][49] = 127, @@ -53742,6 +56537,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][49] = 127, [1][1][RTW89_MKK][0][49] = 127, [1][1][RTW89_IC][1][49] = -28, + [1][1][RTW89_IC][2][49] = 44, [1][1][RTW89_KCC][1][49] = -14, [1][1][RTW89_KCC][0][49] = 127, [1][1][RTW89_ACMA][1][49] = 127, @@ -53751,6 +56547,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][49] = 127, [1][1][RTW89_UK][1][49] = 127, [1][1][RTW89_UK][0][49] = 127, + [1][1][RTW89_THAILAND][1][49] = 127, + [1][1][RTW89_THAILAND][0][49] = 127, [1][1][RTW89_FCC][1][51] = -28, [1][1][RTW89_FCC][2][51] = 127, [1][1][RTW89_ETSI][1][51] = 127, @@ -53758,6 +56556,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][51] = 127, [1][1][RTW89_MKK][0][51] = 127, [1][1][RTW89_IC][1][51] = -28, + [1][1][RTW89_IC][2][51] = 44, [1][1][RTW89_KCC][1][51] = -14, [1][1][RTW89_KCC][0][51] = 127, [1][1][RTW89_ACMA][1][51] = 127, @@ -53767,6 +56566,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][51] = 127, [1][1][RTW89_UK][1][51] = 127, [1][1][RTW89_UK][0][51] = 127, + [1][1][RTW89_THAILAND][1][51] = 127, + [1][1][RTW89_THAILAND][0][51] = 127, [1][1][RTW89_FCC][1][53] = -26, [1][1][RTW89_FCC][2][53] = 127, [1][1][RTW89_ETSI][1][53] = 127, @@ -53774,6 +56575,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][53] = 127, [1][1][RTW89_MKK][0][53] = 127, [1][1][RTW89_IC][1][53] = -26, + [1][1][RTW89_IC][2][53] = 44, [1][1][RTW89_KCC][1][53] = -14, [1][1][RTW89_KCC][0][53] = 127, [1][1][RTW89_ACMA][1][53] = 127, @@ -53783,6 +56585,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][53] = 127, [1][1][RTW89_UK][1][53] = 127, [1][1][RTW89_UK][0][53] = 127, + [1][1][RTW89_THAILAND][1][53] = 127, + [1][1][RTW89_THAILAND][0][53] = 127, [1][1][RTW89_FCC][1][55] = -28, [1][1][RTW89_FCC][2][55] = 44, [1][1][RTW89_ETSI][1][55] = 127, @@ -53790,6 +56594,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][55] = 127, [1][1][RTW89_MKK][0][55] = 127, [1][1][RTW89_IC][1][55] = -28, + [1][1][RTW89_IC][2][55] = 44, [1][1][RTW89_KCC][1][55] = -14, [1][1][RTW89_KCC][0][55] = 127, [1][1][RTW89_ACMA][1][55] = 127, @@ -53799,6 +56604,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][55] = 127, [1][1][RTW89_UK][1][55] = 127, [1][1][RTW89_UK][0][55] = 127, + [1][1][RTW89_THAILAND][1][55] = 127, + [1][1][RTW89_THAILAND][0][55] = 127, [1][1][RTW89_FCC][1][57] = -28, [1][1][RTW89_FCC][2][57] = 44, [1][1][RTW89_ETSI][1][57] = 127, @@ -53806,6 +56613,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][57] = 127, [1][1][RTW89_MKK][0][57] = 127, [1][1][RTW89_IC][1][57] = -28, + [1][1][RTW89_IC][2][57] = 44, [1][1][RTW89_KCC][1][57] = -14, [1][1][RTW89_KCC][0][57] = 127, [1][1][RTW89_ACMA][1][57] = 127, @@ -53815,6 +56623,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][57] = 127, [1][1][RTW89_UK][1][57] = 127, [1][1][RTW89_UK][0][57] = 127, + [1][1][RTW89_THAILAND][1][57] = 127, + [1][1][RTW89_THAILAND][0][57] = 127, [1][1][RTW89_FCC][1][59] = -28, [1][1][RTW89_FCC][2][59] = 44, [1][1][RTW89_ETSI][1][59] = 127, @@ -53822,6 +56632,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][59] = 127, [1][1][RTW89_MKK][0][59] = 127, [1][1][RTW89_IC][1][59] = -28, + [1][1][RTW89_IC][2][59] = 44, [1][1][RTW89_KCC][1][59] = -14, [1][1][RTW89_KCC][0][59] = 127, [1][1][RTW89_ACMA][1][59] = 127, @@ -53831,6 +56642,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][59] = 127, [1][1][RTW89_UK][1][59] = 127, [1][1][RTW89_UK][0][59] = 127, + [1][1][RTW89_THAILAND][1][59] = 127, + [1][1][RTW89_THAILAND][0][59] = 127, [1][1][RTW89_FCC][1][60] = -28, [1][1][RTW89_FCC][2][60] = 44, [1][1][RTW89_ETSI][1][60] = 127, @@ -53838,6 +56651,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][60] = 127, [1][1][RTW89_MKK][0][60] = 127, [1][1][RTW89_IC][1][60] = -28, + [1][1][RTW89_IC][2][60] = 44, [1][1][RTW89_KCC][1][60] = -14, [1][1][RTW89_KCC][0][60] = 127, [1][1][RTW89_ACMA][1][60] = 127, @@ -53847,6 +56661,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][60] = 127, [1][1][RTW89_UK][1][60] = 127, [1][1][RTW89_UK][0][60] = 127, + [1][1][RTW89_THAILAND][1][60] = 127, + [1][1][RTW89_THAILAND][0][60] = 127, [1][1][RTW89_FCC][1][62] = -28, [1][1][RTW89_FCC][2][62] = 44, [1][1][RTW89_ETSI][1][62] = 127, @@ -53854,6 +56670,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][62] = 127, [1][1][RTW89_MKK][0][62] = 127, [1][1][RTW89_IC][1][62] = -28, + [1][1][RTW89_IC][2][62] = 44, [1][1][RTW89_KCC][1][62] = -14, [1][1][RTW89_KCC][0][62] = 127, [1][1][RTW89_ACMA][1][62] = 127, @@ -53863,6 +56680,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][62] = 127, [1][1][RTW89_UK][1][62] = 127, [1][1][RTW89_UK][0][62] = 127, + [1][1][RTW89_THAILAND][1][62] = 127, + [1][1][RTW89_THAILAND][0][62] = 127, [1][1][RTW89_FCC][1][64] = -28, [1][1][RTW89_FCC][2][64] = 44, [1][1][RTW89_ETSI][1][64] = 127, @@ -53870,6 +56689,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][64] = 127, [1][1][RTW89_MKK][0][64] = 127, [1][1][RTW89_IC][1][64] = -28, + [1][1][RTW89_IC][2][64] = 44, [1][1][RTW89_KCC][1][64] = -14, [1][1][RTW89_KCC][0][64] = 127, [1][1][RTW89_ACMA][1][64] = 127, @@ -53879,6 +56699,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][64] = 127, [1][1][RTW89_UK][1][64] = 127, [1][1][RTW89_UK][0][64] = 127, + [1][1][RTW89_THAILAND][1][64] = 127, + [1][1][RTW89_THAILAND][0][64] = 127, [1][1][RTW89_FCC][1][66] = -28, [1][1][RTW89_FCC][2][66] = 44, [1][1][RTW89_ETSI][1][66] = 127, @@ -53886,6 +56708,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][66] = 127, [1][1][RTW89_MKK][0][66] = 127, [1][1][RTW89_IC][1][66] = -28, + [1][1][RTW89_IC][2][66] = 44, [1][1][RTW89_KCC][1][66] = -14, [1][1][RTW89_KCC][0][66] = 127, [1][1][RTW89_ACMA][1][66] = 127, @@ -53895,6 +56718,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][66] = 127, [1][1][RTW89_UK][1][66] = 127, [1][1][RTW89_UK][0][66] = 127, + [1][1][RTW89_THAILAND][1][66] = 127, + [1][1][RTW89_THAILAND][0][66] = 127, [1][1][RTW89_FCC][1][68] = -28, [1][1][RTW89_FCC][2][68] = 44, [1][1][RTW89_ETSI][1][68] = 127, @@ -53902,6 +56727,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][68] = 127, [1][1][RTW89_MKK][0][68] = 127, [1][1][RTW89_IC][1][68] = -28, + [1][1][RTW89_IC][2][68] = 44, [1][1][RTW89_KCC][1][68] = -14, [1][1][RTW89_KCC][0][68] = 127, [1][1][RTW89_ACMA][1][68] = 127, @@ -53911,6 +56737,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][68] = 127, [1][1][RTW89_UK][1][68] = 127, [1][1][RTW89_UK][0][68] = 127, + [1][1][RTW89_THAILAND][1][68] = 127, + [1][1][RTW89_THAILAND][0][68] = 127, [1][1][RTW89_FCC][1][70] = -26, [1][1][RTW89_FCC][2][70] = 44, [1][1][RTW89_ETSI][1][70] = 127, @@ -53918,6 +56746,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][70] = 127, [1][1][RTW89_MKK][0][70] = 127, [1][1][RTW89_IC][1][70] = -26, + [1][1][RTW89_IC][2][70] = 44, [1][1][RTW89_KCC][1][70] = -14, [1][1][RTW89_KCC][0][70] = 127, [1][1][RTW89_ACMA][1][70] = 127, @@ -53927,6 +56756,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][70] = 127, [1][1][RTW89_UK][1][70] = 127, [1][1][RTW89_UK][0][70] = 127, + [1][1][RTW89_THAILAND][1][70] = 127, + [1][1][RTW89_THAILAND][0][70] = 127, [1][1][RTW89_FCC][1][72] = -28, [1][1][RTW89_FCC][2][72] = 44, [1][1][RTW89_ETSI][1][72] = 127, @@ -53934,6 +56765,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][72] = 127, [1][1][RTW89_MKK][0][72] = 127, [1][1][RTW89_IC][1][72] = -28, + [1][1][RTW89_IC][2][72] = 44, [1][1][RTW89_KCC][1][72] = -14, [1][1][RTW89_KCC][0][72] = 127, [1][1][RTW89_ACMA][1][72] = 127, @@ -53943,6 +56775,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][72] = 127, [1][1][RTW89_UK][1][72] = 127, [1][1][RTW89_UK][0][72] = 127, + [1][1][RTW89_THAILAND][1][72] = 127, + [1][1][RTW89_THAILAND][0][72] = 127, [1][1][RTW89_FCC][1][74] = -28, [1][1][RTW89_FCC][2][74] = 44, [1][1][RTW89_ETSI][1][74] = 127, @@ -53950,6 +56784,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][74] = 127, [1][1][RTW89_MKK][0][74] = 127, [1][1][RTW89_IC][1][74] = -28, + [1][1][RTW89_IC][2][74] = 44, [1][1][RTW89_KCC][1][74] = -14, [1][1][RTW89_KCC][0][74] = 127, [1][1][RTW89_ACMA][1][74] = 127, @@ -53959,6 +56794,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][74] = 127, [1][1][RTW89_UK][1][74] = 127, [1][1][RTW89_UK][0][74] = 127, + [1][1][RTW89_THAILAND][1][74] = 127, + [1][1][RTW89_THAILAND][0][74] = 127, [1][1][RTW89_FCC][1][75] = -28, [1][1][RTW89_FCC][2][75] = 44, [1][1][RTW89_ETSI][1][75] = 127, @@ -53966,6 +56803,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][75] = 127, [1][1][RTW89_MKK][0][75] = 127, [1][1][RTW89_IC][1][75] = -28, + [1][1][RTW89_IC][2][75] = 44, [1][1][RTW89_KCC][1][75] = -14, [1][1][RTW89_KCC][0][75] = 127, [1][1][RTW89_ACMA][1][75] = 127, @@ -53975,6 +56813,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][75] = 127, [1][1][RTW89_UK][1][75] = 127, [1][1][RTW89_UK][0][75] = 127, + [1][1][RTW89_THAILAND][1][75] = 127, + [1][1][RTW89_THAILAND][0][75] = 127, [1][1][RTW89_FCC][1][77] = -28, [1][1][RTW89_FCC][2][77] = 44, [1][1][RTW89_ETSI][1][77] = 127, @@ -53982,6 +56822,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][77] = 127, [1][1][RTW89_MKK][0][77] = 127, [1][1][RTW89_IC][1][77] = -28, + [1][1][RTW89_IC][2][77] = 44, [1][1][RTW89_KCC][1][77] = -14, [1][1][RTW89_KCC][0][77] = 127, [1][1][RTW89_ACMA][1][77] = 127, @@ -53991,6 +56832,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][77] = 127, [1][1][RTW89_UK][1][77] = 127, [1][1][RTW89_UK][0][77] = 127, + [1][1][RTW89_THAILAND][1][77] = 127, + [1][1][RTW89_THAILAND][0][77] = 127, [1][1][RTW89_FCC][1][79] = -28, [1][1][RTW89_FCC][2][79] = 44, [1][1][RTW89_ETSI][1][79] = 127, @@ -53998,6 +56841,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][79] = 127, [1][1][RTW89_MKK][0][79] = 127, [1][1][RTW89_IC][1][79] = -28, + [1][1][RTW89_IC][2][79] = 44, [1][1][RTW89_KCC][1][79] = -14, [1][1][RTW89_KCC][0][79] = 127, [1][1][RTW89_ACMA][1][79] = 127, @@ -54007,6 +56851,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][79] = 127, [1][1][RTW89_UK][1][79] = 127, [1][1][RTW89_UK][0][79] = 127, + [1][1][RTW89_THAILAND][1][79] = 127, + [1][1][RTW89_THAILAND][0][79] = 127, [1][1][RTW89_FCC][1][81] = -28, [1][1][RTW89_FCC][2][81] = 44, [1][1][RTW89_ETSI][1][81] = 127, @@ -54014,6 +56860,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][81] = 127, [1][1][RTW89_MKK][0][81] = 127, [1][1][RTW89_IC][1][81] = -28, + [1][1][RTW89_IC][2][81] = 44, [1][1][RTW89_KCC][1][81] = -14, [1][1][RTW89_KCC][0][81] = 127, [1][1][RTW89_ACMA][1][81] = 127, @@ -54023,6 +56870,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][81] = 127, [1][1][RTW89_UK][1][81] = 127, [1][1][RTW89_UK][0][81] = 127, + [1][1][RTW89_THAILAND][1][81] = 127, + [1][1][RTW89_THAILAND][0][81] = 127, [1][1][RTW89_FCC][1][83] = -28, [1][1][RTW89_FCC][2][83] = 44, [1][1][RTW89_ETSI][1][83] = 127, @@ -54030,6 +56879,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][83] = 127, [1][1][RTW89_MKK][0][83] = 127, [1][1][RTW89_IC][1][83] = -28, + [1][1][RTW89_IC][2][83] = 44, [1][1][RTW89_KCC][1][83] = -14, [1][1][RTW89_KCC][0][83] = 127, [1][1][RTW89_ACMA][1][83] = 127, @@ -54039,6 +56889,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][83] = 127, [1][1][RTW89_UK][1][83] = 127, [1][1][RTW89_UK][0][83] = 127, + [1][1][RTW89_THAILAND][1][83] = 127, + [1][1][RTW89_THAILAND][0][83] = 127, [1][1][RTW89_FCC][1][85] = -28, [1][1][RTW89_FCC][2][85] = 44, [1][1][RTW89_ETSI][1][85] = 127, @@ -54046,6 +56898,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][85] = 127, [1][1][RTW89_MKK][0][85] = 127, [1][1][RTW89_IC][1][85] = -28, + [1][1][RTW89_IC][2][85] = 44, [1][1][RTW89_KCC][1][85] = -14, [1][1][RTW89_KCC][0][85] = 127, [1][1][RTW89_ACMA][1][85] = 127, @@ -54055,6 +56908,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][85] = 127, [1][1][RTW89_UK][1][85] = 127, [1][1][RTW89_UK][0][85] = 127, + [1][1][RTW89_THAILAND][1][85] = 127, + [1][1][RTW89_THAILAND][0][85] = 127, [1][1][RTW89_FCC][1][87] = -28, [1][1][RTW89_FCC][2][87] = 127, [1][1][RTW89_ETSI][1][87] = 127, @@ -54062,6 +56917,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][87] = 127, [1][1][RTW89_MKK][0][87] = 127, [1][1][RTW89_IC][1][87] = -28, + [1][1][RTW89_IC][2][87] = 127, [1][1][RTW89_KCC][1][87] = -14, [1][1][RTW89_KCC][0][87] = 127, [1][1][RTW89_ACMA][1][87] = 127, @@ -54071,6 +56927,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][87] = 127, [1][1][RTW89_UK][1][87] = 127, [1][1][RTW89_UK][0][87] = 127, + [1][1][RTW89_THAILAND][1][87] = 127, + [1][1][RTW89_THAILAND][0][87] = 127, [1][1][RTW89_FCC][1][89] = -26, [1][1][RTW89_FCC][2][89] = 127, [1][1][RTW89_ETSI][1][89] = 127, @@ -54078,6 +56936,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][89] = 127, [1][1][RTW89_MKK][0][89] = 127, [1][1][RTW89_IC][1][89] = -26, + [1][1][RTW89_IC][2][89] = 127, [1][1][RTW89_KCC][1][89] = -14, [1][1][RTW89_KCC][0][89] = 127, [1][1][RTW89_ACMA][1][89] = 127, @@ -54087,6 +56946,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][89] = 127, [1][1][RTW89_UK][1][89] = 127, [1][1][RTW89_UK][0][89] = 127, + [1][1][RTW89_THAILAND][1][89] = 127, + [1][1][RTW89_THAILAND][0][89] = 127, [1][1][RTW89_FCC][1][90] = -26, [1][1][RTW89_FCC][2][90] = 127, [1][1][RTW89_ETSI][1][90] = 127, @@ -54094,6 +56955,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][90] = 127, [1][1][RTW89_MKK][0][90] = 127, [1][1][RTW89_IC][1][90] = -26, + [1][1][RTW89_IC][2][90] = 127, [1][1][RTW89_KCC][1][90] = -14, [1][1][RTW89_KCC][0][90] = 127, [1][1][RTW89_ACMA][1][90] = 127, @@ -54103,6 +56965,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][90] = 127, [1][1][RTW89_UK][1][90] = 127, [1][1][RTW89_UK][0][90] = 127, + [1][1][RTW89_THAILAND][1][90] = 127, + [1][1][RTW89_THAILAND][0][90] = 127, [1][1][RTW89_FCC][1][92] = -26, [1][1][RTW89_FCC][2][92] = 127, [1][1][RTW89_ETSI][1][92] = 127, @@ -54110,6 +56974,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][92] = 127, [1][1][RTW89_MKK][0][92] = 127, [1][1][RTW89_IC][1][92] = -26, + [1][1][RTW89_IC][2][92] = 127, [1][1][RTW89_KCC][1][92] = -14, [1][1][RTW89_KCC][0][92] = 127, [1][1][RTW89_ACMA][1][92] = 127, @@ -54119,6 +56984,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][92] = 127, [1][1][RTW89_UK][1][92] = 127, [1][1][RTW89_UK][0][92] = 127, + [1][1][RTW89_THAILAND][1][92] = 127, + [1][1][RTW89_THAILAND][0][92] = 127, [1][1][RTW89_FCC][1][94] = -26, [1][1][RTW89_FCC][2][94] = 127, [1][1][RTW89_ETSI][1][94] = 127, @@ -54126,6 +56993,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][94] = 127, [1][1][RTW89_MKK][0][94] = 127, [1][1][RTW89_IC][1][94] = -26, + [1][1][RTW89_IC][2][94] = 127, [1][1][RTW89_KCC][1][94] = -14, [1][1][RTW89_KCC][0][94] = 127, [1][1][RTW89_ACMA][1][94] = 127, @@ -54135,6 +57003,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][94] = 127, [1][1][RTW89_UK][1][94] = 127, [1][1][RTW89_UK][0][94] = 127, + [1][1][RTW89_THAILAND][1][94] = 127, + [1][1][RTW89_THAILAND][0][94] = 127, [1][1][RTW89_FCC][1][96] = -26, [1][1][RTW89_FCC][2][96] = 127, [1][1][RTW89_ETSI][1][96] = 127, @@ -54142,6 +57012,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][96] = 127, [1][1][RTW89_MKK][0][96] = 127, [1][1][RTW89_IC][1][96] = -26, + [1][1][RTW89_IC][2][96] = 127, [1][1][RTW89_KCC][1][96] = -14, [1][1][RTW89_KCC][0][96] = 127, [1][1][RTW89_ACMA][1][96] = 127, @@ -54151,6 +57022,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][96] = 127, [1][1][RTW89_UK][1][96] = 127, [1][1][RTW89_UK][0][96] = 127, + [1][1][RTW89_THAILAND][1][96] = 127, + [1][1][RTW89_THAILAND][0][96] = 127, [1][1][RTW89_FCC][1][98] = -26, [1][1][RTW89_FCC][2][98] = 127, [1][1][RTW89_ETSI][1][98] = 127, @@ -54158,6 +57031,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][98] = 127, [1][1][RTW89_MKK][0][98] = 127, [1][1][RTW89_IC][1][98] = -26, + [1][1][RTW89_IC][2][98] = 127, [1][1][RTW89_KCC][1][98] = -14, [1][1][RTW89_KCC][0][98] = 127, [1][1][RTW89_ACMA][1][98] = 127, @@ -54167,6 +57041,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][98] = 127, [1][1][RTW89_UK][1][98] = 127, [1][1][RTW89_UK][0][98] = 127, + [1][1][RTW89_THAILAND][1][98] = 127, + [1][1][RTW89_THAILAND][0][98] = 127, [1][1][RTW89_FCC][1][100] = -26, [1][1][RTW89_FCC][2][100] = 127, [1][1][RTW89_ETSI][1][100] = 127, @@ -54174,6 +57050,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][100] = 127, [1][1][RTW89_MKK][0][100] = 127, [1][1][RTW89_IC][1][100] = -26, + [1][1][RTW89_IC][2][100] = 127, [1][1][RTW89_KCC][1][100] = -14, [1][1][RTW89_KCC][0][100] = 127, [1][1][RTW89_ACMA][1][100] = 127, @@ -54183,6 +57060,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][100] = 127, [1][1][RTW89_UK][1][100] = 127, [1][1][RTW89_UK][0][100] = 127, + [1][1][RTW89_THAILAND][1][100] = 127, + [1][1][RTW89_THAILAND][0][100] = 127, [1][1][RTW89_FCC][1][102] = -26, [1][1][RTW89_FCC][2][102] = 127, [1][1][RTW89_ETSI][1][102] = 127, @@ -54190,6 +57069,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][102] = 127, [1][1][RTW89_MKK][0][102] = 127, [1][1][RTW89_IC][1][102] = -26, + [1][1][RTW89_IC][2][102] = 127, [1][1][RTW89_KCC][1][102] = -14, [1][1][RTW89_KCC][0][102] = 127, [1][1][RTW89_ACMA][1][102] = 127, @@ -54199,6 +57079,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][102] = 127, [1][1][RTW89_UK][1][102] = 127, [1][1][RTW89_UK][0][102] = 127, + [1][1][RTW89_THAILAND][1][102] = 127, + [1][1][RTW89_THAILAND][0][102] = 127, [1][1][RTW89_FCC][1][104] = -26, [1][1][RTW89_FCC][2][104] = 127, [1][1][RTW89_ETSI][1][104] = 127, @@ -54206,6 +57088,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][104] = 127, [1][1][RTW89_MKK][0][104] = 127, [1][1][RTW89_IC][1][104] = -26, + [1][1][RTW89_IC][2][104] = 127, [1][1][RTW89_KCC][1][104] = -14, [1][1][RTW89_KCC][0][104] = 127, [1][1][RTW89_ACMA][1][104] = 127, @@ -54215,6 +57098,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][104] = 127, [1][1][RTW89_UK][1][104] = 127, [1][1][RTW89_UK][0][104] = 127, + [1][1][RTW89_THAILAND][1][104] = 127, + [1][1][RTW89_THAILAND][0][104] = 127, [1][1][RTW89_FCC][1][105] = -26, [1][1][RTW89_FCC][2][105] = 127, [1][1][RTW89_ETSI][1][105] = 127, @@ -54222,6 +57107,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][105] = 127, [1][1][RTW89_MKK][0][105] = 127, [1][1][RTW89_IC][1][105] = -26, + [1][1][RTW89_IC][2][105] = 127, [1][1][RTW89_KCC][1][105] = -14, [1][1][RTW89_KCC][0][105] = 127, [1][1][RTW89_ACMA][1][105] = 127, @@ -54231,6 +57117,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][105] = 127, [1][1][RTW89_UK][1][105] = 127, [1][1][RTW89_UK][0][105] = 127, + [1][1][RTW89_THAILAND][1][105] = 127, + [1][1][RTW89_THAILAND][0][105] = 127, [1][1][RTW89_FCC][1][107] = -22, [1][1][RTW89_FCC][2][107] = 127, [1][1][RTW89_ETSI][1][107] = 127, @@ -54238,6 +57126,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][107] = 127, [1][1][RTW89_MKK][0][107] = 127, [1][1][RTW89_IC][1][107] = -22, + [1][1][RTW89_IC][2][107] = 127, [1][1][RTW89_KCC][1][107] = -14, [1][1][RTW89_KCC][0][107] = 127, [1][1][RTW89_ACMA][1][107] = 127, @@ -54247,6 +57136,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][107] = 127, [1][1][RTW89_UK][1][107] = 127, [1][1][RTW89_UK][0][107] = 127, + [1][1][RTW89_THAILAND][1][107] = 127, + [1][1][RTW89_THAILAND][0][107] = 127, [1][1][RTW89_FCC][1][109] = -22, [1][1][RTW89_FCC][2][109] = 127, [1][1][RTW89_ETSI][1][109] = 127, @@ -54254,6 +57145,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][109] = 127, [1][1][RTW89_MKK][0][109] = 127, [1][1][RTW89_IC][1][109] = -22, + [1][1][RTW89_IC][2][109] = 127, [1][1][RTW89_KCC][1][109] = 127, [1][1][RTW89_KCC][0][109] = 127, [1][1][RTW89_ACMA][1][109] = 127, @@ -54263,6 +57155,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][109] = 127, [1][1][RTW89_UK][1][109] = 127, [1][1][RTW89_UK][0][109] = 127, + [1][1][RTW89_THAILAND][1][109] = 127, + [1][1][RTW89_THAILAND][0][109] = 127, [1][1][RTW89_FCC][1][111] = 127, [1][1][RTW89_FCC][2][111] = 127, [1][1][RTW89_ETSI][1][111] = 127, @@ -54270,6 +57164,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][111] = 127, [1][1][RTW89_MKK][0][111] = 127, [1][1][RTW89_IC][1][111] = 127, + [1][1][RTW89_IC][2][111] = 127, [1][1][RTW89_KCC][1][111] = 127, [1][1][RTW89_KCC][0][111] = 127, [1][1][RTW89_ACMA][1][111] = 127, @@ -54279,6 +57174,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][111] = 127, [1][1][RTW89_UK][1][111] = 127, [1][1][RTW89_UK][0][111] = 127, + [1][1][RTW89_THAILAND][1][111] = 127, + [1][1][RTW89_THAILAND][0][111] = 127, [1][1][RTW89_FCC][1][113] = 127, [1][1][RTW89_FCC][2][113] = 127, [1][1][RTW89_ETSI][1][113] = 127, @@ -54286,6 +57183,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][113] = 127, [1][1][RTW89_MKK][0][113] = 127, [1][1][RTW89_IC][1][113] = 127, + [1][1][RTW89_IC][2][113] = 127, [1][1][RTW89_KCC][1][113] = 127, [1][1][RTW89_KCC][0][113] = 127, [1][1][RTW89_ACMA][1][113] = 127, @@ -54295,6 +57193,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][113] = 127, [1][1][RTW89_UK][1][113] = 127, [1][1][RTW89_UK][0][113] = 127, + [1][1][RTW89_THAILAND][1][113] = 127, + [1][1][RTW89_THAILAND][0][113] = 127, [1][1][RTW89_FCC][1][115] = 127, [1][1][RTW89_FCC][2][115] = 127, [1][1][RTW89_ETSI][1][115] = 127, @@ -54302,6 +57202,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][115] = 127, [1][1][RTW89_MKK][0][115] = 127, [1][1][RTW89_IC][1][115] = 127, + [1][1][RTW89_IC][2][115] = 127, [1][1][RTW89_KCC][1][115] = 127, [1][1][RTW89_KCC][0][115] = 127, [1][1][RTW89_ACMA][1][115] = 127, @@ -54311,6 +57212,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][115] = 127, [1][1][RTW89_UK][1][115] = 127, [1][1][RTW89_UK][0][115] = 127, + [1][1][RTW89_THAILAND][1][115] = 127, + [1][1][RTW89_THAILAND][0][115] = 127, [1][1][RTW89_FCC][1][117] = 127, [1][1][RTW89_FCC][2][117] = 127, [1][1][RTW89_ETSI][1][117] = 127, @@ -54318,6 +57221,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][117] = 127, [1][1][RTW89_MKK][0][117] = 127, [1][1][RTW89_IC][1][117] = 127, + [1][1][RTW89_IC][2][117] = 127, [1][1][RTW89_KCC][1][117] = 127, [1][1][RTW89_KCC][0][117] = 127, [1][1][RTW89_ACMA][1][117] = 127, @@ -54327,6 +57231,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][117] = 127, [1][1][RTW89_UK][1][117] = 127, [1][1][RTW89_UK][0][117] = 127, + [1][1][RTW89_THAILAND][1][117] = 127, + [1][1][RTW89_THAILAND][0][117] = 127, [1][1][RTW89_FCC][1][119] = 127, [1][1][RTW89_FCC][2][119] = 127, [1][1][RTW89_ETSI][1][119] = 127, @@ -54334,6 +57240,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][119] = 127, [1][1][RTW89_MKK][0][119] = 127, [1][1][RTW89_IC][1][119] = 127, + [1][1][RTW89_IC][2][119] = 127, [1][1][RTW89_KCC][1][119] = 127, [1][1][RTW89_KCC][0][119] = 127, [1][1][RTW89_ACMA][1][119] = 127, @@ -54343,6 +57250,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][119] = 127, [1][1][RTW89_UK][1][119] = 127, [1][1][RTW89_UK][0][119] = 127, + [1][1][RTW89_THAILAND][1][119] = 127, + [1][1][RTW89_THAILAND][0][119] = 127, [2][0][RTW89_FCC][1][0] = 8, [2][0][RTW89_FCC][2][0] = 60, [2][0][RTW89_ETSI][1][0] = 56, @@ -54350,6 +57259,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][0] = 54, [2][0][RTW89_MKK][0][0] = 14, [2][0][RTW89_IC][1][0] = 8, + [2][0][RTW89_IC][2][0] = 60, [2][0][RTW89_KCC][1][0] = -2, [2][0][RTW89_KCC][0][0] = -2, [2][0][RTW89_ACMA][1][0] = 56, @@ -54359,6 +57269,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][0] = 18, [2][0][RTW89_UK][1][0] = 56, [2][0][RTW89_UK][0][0] = 18, + [2][0][RTW89_THAILAND][1][0] = 52, + [2][0][RTW89_THAILAND][0][0] = 8, [2][0][RTW89_FCC][1][2] = 8, [2][0][RTW89_FCC][2][2] = 60, [2][0][RTW89_ETSI][1][2] = 56, @@ -54366,6 +57278,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][2] = 54, [2][0][RTW89_MKK][0][2] = 14, [2][0][RTW89_IC][1][2] = 8, + [2][0][RTW89_IC][2][2] = 60, [2][0][RTW89_KCC][1][2] = -2, [2][0][RTW89_KCC][0][2] = -2, [2][0][RTW89_ACMA][1][2] = 56, @@ -54375,6 +57288,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][2] = 18, [2][0][RTW89_UK][1][2] = 56, [2][0][RTW89_UK][0][2] = 18, + [2][0][RTW89_THAILAND][1][2] = 52, + [2][0][RTW89_THAILAND][0][2] = 8, [2][0][RTW89_FCC][1][4] = 8, [2][0][RTW89_FCC][2][4] = 60, [2][0][RTW89_ETSI][1][4] = 56, @@ -54382,6 +57297,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][4] = 54, [2][0][RTW89_MKK][0][4] = 14, [2][0][RTW89_IC][1][4] = 8, + [2][0][RTW89_IC][2][4] = 60, [2][0][RTW89_KCC][1][4] = -2, [2][0][RTW89_KCC][0][4] = -2, [2][0][RTW89_ACMA][1][4] = 56, @@ -54391,6 +57307,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][4] = 18, [2][0][RTW89_UK][1][4] = 56, [2][0][RTW89_UK][0][4] = 18, + [2][0][RTW89_THAILAND][1][4] = 52, + [2][0][RTW89_THAILAND][0][4] = 8, [2][0][RTW89_FCC][1][6] = 8, [2][0][RTW89_FCC][2][6] = 60, [2][0][RTW89_ETSI][1][6] = 56, @@ -54398,6 +57316,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][6] = 54, [2][0][RTW89_MKK][0][6] = 14, [2][0][RTW89_IC][1][6] = 8, + [2][0][RTW89_IC][2][6] = 60, [2][0][RTW89_KCC][1][6] = -2, [2][0][RTW89_KCC][0][6] = -2, [2][0][RTW89_ACMA][1][6] = 56, @@ -54407,6 +57326,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][6] = 18, [2][0][RTW89_UK][1][6] = 56, [2][0][RTW89_UK][0][6] = 18, + [2][0][RTW89_THAILAND][1][6] = 52, + [2][0][RTW89_THAILAND][0][6] = 8, [2][0][RTW89_FCC][1][8] = 8, [2][0][RTW89_FCC][2][8] = 60, [2][0][RTW89_ETSI][1][8] = 56, @@ -54414,6 +57335,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][8] = 54, [2][0][RTW89_MKK][0][8] = 14, [2][0][RTW89_IC][1][8] = 8, + [2][0][RTW89_IC][2][8] = 60, [2][0][RTW89_KCC][1][8] = -2, [2][0][RTW89_KCC][0][8] = -2, [2][0][RTW89_ACMA][1][8] = 56, @@ -54423,6 +57345,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][8] = 18, [2][0][RTW89_UK][1][8] = 56, [2][0][RTW89_UK][0][8] = 18, + [2][0][RTW89_THAILAND][1][8] = 52, + [2][0][RTW89_THAILAND][0][8] = 8, [2][0][RTW89_FCC][1][10] = 8, [2][0][RTW89_FCC][2][10] = 60, [2][0][RTW89_ETSI][1][10] = 56, @@ -54430,6 +57354,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][10] = 54, [2][0][RTW89_MKK][0][10] = 14, [2][0][RTW89_IC][1][10] = 8, + [2][0][RTW89_IC][2][10] = 60, [2][0][RTW89_KCC][1][10] = -2, [2][0][RTW89_KCC][0][10] = -2, [2][0][RTW89_ACMA][1][10] = 56, @@ -54439,6 +57364,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][10] = 18, [2][0][RTW89_UK][1][10] = 56, [2][0][RTW89_UK][0][10] = 18, + [2][0][RTW89_THAILAND][1][10] = 52, + [2][0][RTW89_THAILAND][0][10] = 8, [2][0][RTW89_FCC][1][12] = 8, [2][0][RTW89_FCC][2][12] = 60, [2][0][RTW89_ETSI][1][12] = 56, @@ -54446,6 +57373,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][12] = 54, [2][0][RTW89_MKK][0][12] = 14, [2][0][RTW89_IC][1][12] = 8, + [2][0][RTW89_IC][2][12] = 60, [2][0][RTW89_KCC][1][12] = -2, [2][0][RTW89_KCC][0][12] = -2, [2][0][RTW89_ACMA][1][12] = 56, @@ -54455,6 +57383,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][12] = 18, [2][0][RTW89_UK][1][12] = 56, [2][0][RTW89_UK][0][12] = 18, + [2][0][RTW89_THAILAND][1][12] = 52, + [2][0][RTW89_THAILAND][0][12] = 8, [2][0][RTW89_FCC][1][14] = 8, [2][0][RTW89_FCC][2][14] = 60, [2][0][RTW89_ETSI][1][14] = 56, @@ -54462,6 +57392,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][14] = 54, [2][0][RTW89_MKK][0][14] = 14, [2][0][RTW89_IC][1][14] = 8, + [2][0][RTW89_IC][2][14] = 60, [2][0][RTW89_KCC][1][14] = -2, [2][0][RTW89_KCC][0][14] = -2, [2][0][RTW89_ACMA][1][14] = 56, @@ -54471,6 +57402,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][14] = 18, [2][0][RTW89_UK][1][14] = 56, [2][0][RTW89_UK][0][14] = 18, + [2][0][RTW89_THAILAND][1][14] = 52, + [2][0][RTW89_THAILAND][0][14] = 8, [2][0][RTW89_FCC][1][15] = 8, [2][0][RTW89_FCC][2][15] = 60, [2][0][RTW89_ETSI][1][15] = 56, @@ -54478,6 +57411,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][15] = 54, [2][0][RTW89_MKK][0][15] = 14, [2][0][RTW89_IC][1][15] = 8, + [2][0][RTW89_IC][2][15] = 60, [2][0][RTW89_KCC][1][15] = -2, [2][0][RTW89_KCC][0][15] = -2, [2][0][RTW89_ACMA][1][15] = 56, @@ -54487,6 +57421,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][15] = 18, [2][0][RTW89_UK][1][15] = 56, [2][0][RTW89_UK][0][15] = 18, + [2][0][RTW89_THAILAND][1][15] = 52, + [2][0][RTW89_THAILAND][0][15] = 8, [2][0][RTW89_FCC][1][17] = 8, [2][0][RTW89_FCC][2][17] = 60, [2][0][RTW89_ETSI][1][17] = 56, @@ -54494,6 +57430,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][17] = 54, [2][0][RTW89_MKK][0][17] = 14, [2][0][RTW89_IC][1][17] = 8, + [2][0][RTW89_IC][2][17] = 60, [2][0][RTW89_KCC][1][17] = -2, [2][0][RTW89_KCC][0][17] = -2, [2][0][RTW89_ACMA][1][17] = 56, @@ -54503,6 +57440,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][17] = 18, [2][0][RTW89_UK][1][17] = 56, [2][0][RTW89_UK][0][17] = 18, + [2][0][RTW89_THAILAND][1][17] = 52, + [2][0][RTW89_THAILAND][0][17] = 8, [2][0][RTW89_FCC][1][19] = 8, [2][0][RTW89_FCC][2][19] = 60, [2][0][RTW89_ETSI][1][19] = 56, @@ -54510,6 +57449,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][19] = 54, [2][0][RTW89_MKK][0][19] = 14, [2][0][RTW89_IC][1][19] = 8, + [2][0][RTW89_IC][2][19] = 60, [2][0][RTW89_KCC][1][19] = -2, [2][0][RTW89_KCC][0][19] = -2, [2][0][RTW89_ACMA][1][19] = 56, @@ -54519,6 +57459,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][19] = 18, [2][0][RTW89_UK][1][19] = 56, [2][0][RTW89_UK][0][19] = 18, + [2][0][RTW89_THAILAND][1][19] = 52, + [2][0][RTW89_THAILAND][0][19] = 8, [2][0][RTW89_FCC][1][21] = 8, [2][0][RTW89_FCC][2][21] = 60, [2][0][RTW89_ETSI][1][21] = 56, @@ -54526,6 +57468,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][21] = 54, [2][0][RTW89_MKK][0][21] = 14, [2][0][RTW89_IC][1][21] = 8, + [2][0][RTW89_IC][2][21] = 60, [2][0][RTW89_KCC][1][21] = -2, [2][0][RTW89_KCC][0][21] = -2, [2][0][RTW89_ACMA][1][21] = 56, @@ -54535,13 +57478,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][21] = 18, [2][0][RTW89_UK][1][21] = 56, [2][0][RTW89_UK][0][21] = 18, + [2][0][RTW89_THAILAND][1][21] = 52, + [2][0][RTW89_THAILAND][0][21] = 8, [2][0][RTW89_FCC][1][23] = 8, - [2][0][RTW89_FCC][2][23] = 78, + [2][0][RTW89_FCC][2][23] = 70, [2][0][RTW89_ETSI][1][23] = 56, [2][0][RTW89_ETSI][0][23] = 18, [2][0][RTW89_MKK][1][23] = 56, [2][0][RTW89_MKK][0][23] = 14, [2][0][RTW89_IC][1][23] = 8, + [2][0][RTW89_IC][2][23] = 70, [2][0][RTW89_KCC][1][23] = -2, [2][0][RTW89_KCC][0][23] = -2, [2][0][RTW89_ACMA][1][23] = 56, @@ -54551,13 +57497,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][23] = 18, [2][0][RTW89_UK][1][23] = 56, [2][0][RTW89_UK][0][23] = 18, + [2][0][RTW89_THAILAND][1][23] = 52, + [2][0][RTW89_THAILAND][0][23] = 8, [2][0][RTW89_FCC][1][25] = 8, - [2][0][RTW89_FCC][2][25] = 78, + [2][0][RTW89_FCC][2][25] = 70, [2][0][RTW89_ETSI][1][25] = 56, [2][0][RTW89_ETSI][0][25] = 18, [2][0][RTW89_MKK][1][25] = 56, [2][0][RTW89_MKK][0][25] = 14, [2][0][RTW89_IC][1][25] = 8, + [2][0][RTW89_IC][2][25] = 70, [2][0][RTW89_KCC][1][25] = -2, [2][0][RTW89_KCC][0][25] = -2, [2][0][RTW89_ACMA][1][25] = 56, @@ -54567,13 +57516,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][25] = 18, [2][0][RTW89_UK][1][25] = 56, [2][0][RTW89_UK][0][25] = 18, + [2][0][RTW89_THAILAND][1][25] = 52, + [2][0][RTW89_THAILAND][0][25] = 8, [2][0][RTW89_FCC][1][27] = 8, - [2][0][RTW89_FCC][2][27] = 78, + [2][0][RTW89_FCC][2][27] = 70, [2][0][RTW89_ETSI][1][27] = 56, [2][0][RTW89_ETSI][0][27] = 18, [2][0][RTW89_MKK][1][27] = 56, [2][0][RTW89_MKK][0][27] = 14, [2][0][RTW89_IC][1][27] = 8, + [2][0][RTW89_IC][2][27] = 70, [2][0][RTW89_KCC][1][27] = -2, [2][0][RTW89_KCC][0][27] = -2, [2][0][RTW89_ACMA][1][27] = 56, @@ -54583,13 +57535,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][27] = 18, [2][0][RTW89_UK][1][27] = 56, [2][0][RTW89_UK][0][27] = 18, + [2][0][RTW89_THAILAND][1][27] = 52, + [2][0][RTW89_THAILAND][0][27] = 8, [2][0][RTW89_FCC][1][29] = 8, - [2][0][RTW89_FCC][2][29] = 78, + [2][0][RTW89_FCC][2][29] = 70, [2][0][RTW89_ETSI][1][29] = 56, [2][0][RTW89_ETSI][0][29] = 18, [2][0][RTW89_MKK][1][29] = 56, [2][0][RTW89_MKK][0][29] = 14, [2][0][RTW89_IC][1][29] = 8, + [2][0][RTW89_IC][2][29] = 70, [2][0][RTW89_KCC][1][29] = -2, [2][0][RTW89_KCC][0][29] = -2, [2][0][RTW89_ACMA][1][29] = 56, @@ -54599,13 +57554,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][29] = 18, [2][0][RTW89_UK][1][29] = 56, [2][0][RTW89_UK][0][29] = 18, + [2][0][RTW89_THAILAND][1][29] = 52, + [2][0][RTW89_THAILAND][0][29] = 8, [2][0][RTW89_FCC][1][30] = 8, - [2][0][RTW89_FCC][2][30] = 78, + [2][0][RTW89_FCC][2][30] = 70, [2][0][RTW89_ETSI][1][30] = 56, [2][0][RTW89_ETSI][0][30] = 18, [2][0][RTW89_MKK][1][30] = 56, [2][0][RTW89_MKK][0][30] = 14, [2][0][RTW89_IC][1][30] = 8, + [2][0][RTW89_IC][2][30] = 70, [2][0][RTW89_KCC][1][30] = -2, [2][0][RTW89_KCC][0][30] = -2, [2][0][RTW89_ACMA][1][30] = 56, @@ -54615,13 +57573,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][30] = 18, [2][0][RTW89_UK][1][30] = 56, [2][0][RTW89_UK][0][30] = 18, + [2][0][RTW89_THAILAND][1][30] = 52, + [2][0][RTW89_THAILAND][0][30] = 8, [2][0][RTW89_FCC][1][32] = 8, - [2][0][RTW89_FCC][2][32] = 78, + [2][0][RTW89_FCC][2][32] = 70, [2][0][RTW89_ETSI][1][32] = 56, [2][0][RTW89_ETSI][0][32] = 18, [2][0][RTW89_MKK][1][32] = 56, [2][0][RTW89_MKK][0][32] = 14, [2][0][RTW89_IC][1][32] = 8, + [2][0][RTW89_IC][2][32] = 70, [2][0][RTW89_KCC][1][32] = -2, [2][0][RTW89_KCC][0][32] = -2, [2][0][RTW89_ACMA][1][32] = 56, @@ -54631,13 +57592,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][32] = 18, [2][0][RTW89_UK][1][32] = 56, [2][0][RTW89_UK][0][32] = 18, + [2][0][RTW89_THAILAND][1][32] = 52, + [2][0][RTW89_THAILAND][0][32] = 8, [2][0][RTW89_FCC][1][34] = 8, - [2][0][RTW89_FCC][2][34] = 78, + [2][0][RTW89_FCC][2][34] = 70, [2][0][RTW89_ETSI][1][34] = 56, [2][0][RTW89_ETSI][0][34] = 18, [2][0][RTW89_MKK][1][34] = 56, [2][0][RTW89_MKK][0][34] = 14, [2][0][RTW89_IC][1][34] = 8, + [2][0][RTW89_IC][2][34] = 70, [2][0][RTW89_KCC][1][34] = -2, [2][0][RTW89_KCC][0][34] = -2, [2][0][RTW89_ACMA][1][34] = 56, @@ -54647,13 +57611,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][34] = 18, [2][0][RTW89_UK][1][34] = 56, [2][0][RTW89_UK][0][34] = 18, + [2][0][RTW89_THAILAND][1][34] = 52, + [2][0][RTW89_THAILAND][0][34] = 8, [2][0][RTW89_FCC][1][36] = 8, - [2][0][RTW89_FCC][2][36] = 78, + [2][0][RTW89_FCC][2][36] = 70, [2][0][RTW89_ETSI][1][36] = 56, [2][0][RTW89_ETSI][0][36] = 18, [2][0][RTW89_MKK][1][36] = 56, [2][0][RTW89_MKK][0][36] = 14, [2][0][RTW89_IC][1][36] = 8, + [2][0][RTW89_IC][2][36] = 70, [2][0][RTW89_KCC][1][36] = -2, [2][0][RTW89_KCC][0][36] = -2, [2][0][RTW89_ACMA][1][36] = 56, @@ -54663,13 +57630,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][36] = 18, [2][0][RTW89_UK][1][36] = 56, [2][0][RTW89_UK][0][36] = 18, + [2][0][RTW89_THAILAND][1][36] = 52, + [2][0][RTW89_THAILAND][0][36] = 8, [2][0][RTW89_FCC][1][38] = 8, - [2][0][RTW89_FCC][2][38] = 78, + [2][0][RTW89_FCC][2][38] = 70, [2][0][RTW89_ETSI][1][38] = 56, [2][0][RTW89_ETSI][0][38] = 18, [2][0][RTW89_MKK][1][38] = 56, [2][0][RTW89_MKK][0][38] = 14, [2][0][RTW89_IC][1][38] = 8, + [2][0][RTW89_IC][2][38] = 70, [2][0][RTW89_KCC][1][38] = -2, [2][0][RTW89_KCC][0][38] = -2, [2][0][RTW89_ACMA][1][38] = 56, @@ -54679,13 +57649,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][38] = 18, [2][0][RTW89_UK][1][38] = 56, [2][0][RTW89_UK][0][38] = 18, + [2][0][RTW89_THAILAND][1][38] = 52, + [2][0][RTW89_THAILAND][0][38] = 8, [2][0][RTW89_FCC][1][40] = 8, - [2][0][RTW89_FCC][2][40] = 78, + [2][0][RTW89_FCC][2][40] = 70, [2][0][RTW89_ETSI][1][40] = 56, [2][0][RTW89_ETSI][0][40] = 18, [2][0][RTW89_MKK][1][40] = 56, [2][0][RTW89_MKK][0][40] = 14, [2][0][RTW89_IC][1][40] = 8, + [2][0][RTW89_IC][2][40] = 70, [2][0][RTW89_KCC][1][40] = -2, [2][0][RTW89_KCC][0][40] = -2, [2][0][RTW89_ACMA][1][40] = 56, @@ -54695,13 +57668,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][40] = 18, [2][0][RTW89_UK][1][40] = 56, [2][0][RTW89_UK][0][40] = 18, + [2][0][RTW89_THAILAND][1][40] = 52, + [2][0][RTW89_THAILAND][0][40] = 8, [2][0][RTW89_FCC][1][42] = 8, - [2][0][RTW89_FCC][2][42] = 78, + [2][0][RTW89_FCC][2][42] = 70, [2][0][RTW89_ETSI][1][42] = 56, [2][0][RTW89_ETSI][0][42] = 18, [2][0][RTW89_MKK][1][42] = 56, [2][0][RTW89_MKK][0][42] = 14, [2][0][RTW89_IC][1][42] = 8, + [2][0][RTW89_IC][2][42] = 70, [2][0][RTW89_KCC][1][42] = -2, [2][0][RTW89_KCC][0][42] = -2, [2][0][RTW89_ACMA][1][42] = 56, @@ -54711,13 +57687,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][42] = 18, [2][0][RTW89_UK][1][42] = 56, [2][0][RTW89_UK][0][42] = 18, + [2][0][RTW89_THAILAND][1][42] = 52, + [2][0][RTW89_THAILAND][0][42] = 8, [2][0][RTW89_FCC][1][44] = 8, - [2][0][RTW89_FCC][2][44] = 78, + [2][0][RTW89_FCC][2][44] = 70, [2][0][RTW89_ETSI][1][44] = 56, [2][0][RTW89_ETSI][0][44] = 18, [2][0][RTW89_MKK][1][44] = 32, [2][0][RTW89_MKK][0][44] = 14, [2][0][RTW89_IC][1][44] = 8, + [2][0][RTW89_IC][2][44] = 70, [2][0][RTW89_KCC][1][44] = -2, [2][0][RTW89_KCC][0][44] = -2, [2][0][RTW89_ACMA][1][44] = 56, @@ -54727,6 +57706,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][44] = 18, [2][0][RTW89_UK][1][44] = 56, [2][0][RTW89_UK][0][44] = 18, + [2][0][RTW89_THAILAND][1][44] = 52, + [2][0][RTW89_THAILAND][0][44] = 8, [2][0][RTW89_FCC][1][45] = 8, [2][0][RTW89_FCC][2][45] = 127, [2][0][RTW89_ETSI][1][45] = 127, @@ -54734,6 +57715,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][45] = 127, [2][0][RTW89_MKK][0][45] = 127, [2][0][RTW89_IC][1][45] = 8, + [2][0][RTW89_IC][2][45] = 70, [2][0][RTW89_KCC][1][45] = -2, [2][0][RTW89_KCC][0][45] = 127, [2][0][RTW89_ACMA][1][45] = 127, @@ -54743,6 +57725,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][45] = 127, [2][0][RTW89_UK][1][45] = 127, [2][0][RTW89_UK][0][45] = 127, + [2][0][RTW89_THAILAND][1][45] = 127, + [2][0][RTW89_THAILAND][0][45] = 127, [2][0][RTW89_FCC][1][47] = 8, [2][0][RTW89_FCC][2][47] = 127, [2][0][RTW89_ETSI][1][47] = 127, @@ -54750,6 +57734,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][47] = 127, [2][0][RTW89_MKK][0][47] = 127, [2][0][RTW89_IC][1][47] = 8, + [2][0][RTW89_IC][2][47] = 70, [2][0][RTW89_KCC][1][47] = -2, [2][0][RTW89_KCC][0][47] = 127, [2][0][RTW89_ACMA][1][47] = 127, @@ -54759,6 +57744,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][47] = 127, [2][0][RTW89_UK][1][47] = 127, [2][0][RTW89_UK][0][47] = 127, + [2][0][RTW89_THAILAND][1][47] = 127, + [2][0][RTW89_THAILAND][0][47] = 127, [2][0][RTW89_FCC][1][49] = 8, [2][0][RTW89_FCC][2][49] = 127, [2][0][RTW89_ETSI][1][49] = 127, @@ -54766,6 +57753,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][49] = 127, [2][0][RTW89_MKK][0][49] = 127, [2][0][RTW89_IC][1][49] = 8, + [2][0][RTW89_IC][2][49] = 70, [2][0][RTW89_KCC][1][49] = -2, [2][0][RTW89_KCC][0][49] = 127, [2][0][RTW89_ACMA][1][49] = 127, @@ -54775,6 +57763,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][49] = 127, [2][0][RTW89_UK][1][49] = 127, [2][0][RTW89_UK][0][49] = 127, + [2][0][RTW89_THAILAND][1][49] = 127, + [2][0][RTW89_THAILAND][0][49] = 127, [2][0][RTW89_FCC][1][51] = 8, [2][0][RTW89_FCC][2][51] = 127, [2][0][RTW89_ETSI][1][51] = 127, @@ -54782,6 +57772,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][51] = 127, [2][0][RTW89_MKK][0][51] = 127, [2][0][RTW89_IC][1][51] = 8, + [2][0][RTW89_IC][2][51] = 70, [2][0][RTW89_KCC][1][51] = -2, [2][0][RTW89_KCC][0][51] = 127, [2][0][RTW89_ACMA][1][51] = 127, @@ -54791,6 +57782,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][51] = 127, [2][0][RTW89_UK][1][51] = 127, [2][0][RTW89_UK][0][51] = 127, + [2][0][RTW89_THAILAND][1][51] = 127, + [2][0][RTW89_THAILAND][0][51] = 127, [2][0][RTW89_FCC][1][53] = 8, [2][0][RTW89_FCC][2][53] = 127, [2][0][RTW89_ETSI][1][53] = 127, @@ -54798,6 +57791,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][53] = 127, [2][0][RTW89_MKK][0][53] = 127, [2][0][RTW89_IC][1][53] = 8, + [2][0][RTW89_IC][2][53] = 70, [2][0][RTW89_KCC][1][53] = -2, [2][0][RTW89_KCC][0][53] = 127, [2][0][RTW89_ACMA][1][53] = 127, @@ -54807,13 +57801,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][53] = 127, [2][0][RTW89_UK][1][53] = 127, [2][0][RTW89_UK][0][53] = 127, + [2][0][RTW89_THAILAND][1][53] = 127, + [2][0][RTW89_THAILAND][0][53] = 127, [2][0][RTW89_FCC][1][55] = 8, - [2][0][RTW89_FCC][2][55] = 78, + [2][0][RTW89_FCC][2][55] = 68, [2][0][RTW89_ETSI][1][55] = 127, [2][0][RTW89_ETSI][0][55] = 127, [2][0][RTW89_MKK][1][55] = 127, [2][0][RTW89_MKK][0][55] = 127, [2][0][RTW89_IC][1][55] = 8, + [2][0][RTW89_IC][2][55] = 68, [2][0][RTW89_KCC][1][55] = -2, [2][0][RTW89_KCC][0][55] = 127, [2][0][RTW89_ACMA][1][55] = 127, @@ -54823,13 +57820,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][55] = 127, [2][0][RTW89_UK][1][55] = 127, [2][0][RTW89_UK][0][55] = 127, + [2][0][RTW89_THAILAND][1][55] = 127, + [2][0][RTW89_THAILAND][0][55] = 127, [2][0][RTW89_FCC][1][57] = 8, - [2][0][RTW89_FCC][2][57] = 78, + [2][0][RTW89_FCC][2][57] = 68, [2][0][RTW89_ETSI][1][57] = 127, [2][0][RTW89_ETSI][0][57] = 127, [2][0][RTW89_MKK][1][57] = 127, [2][0][RTW89_MKK][0][57] = 127, [2][0][RTW89_IC][1][57] = 8, + [2][0][RTW89_IC][2][57] = 68, [2][0][RTW89_KCC][1][57] = -2, [2][0][RTW89_KCC][0][57] = 127, [2][0][RTW89_ACMA][1][57] = 127, @@ -54839,13 +57839,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][57] = 127, [2][0][RTW89_UK][1][57] = 127, [2][0][RTW89_UK][0][57] = 127, + [2][0][RTW89_THAILAND][1][57] = 127, + [2][0][RTW89_THAILAND][0][57] = 127, [2][0][RTW89_FCC][1][59] = 8, - [2][0][RTW89_FCC][2][59] = 78, + [2][0][RTW89_FCC][2][59] = 68, [2][0][RTW89_ETSI][1][59] = 127, [2][0][RTW89_ETSI][0][59] = 127, [2][0][RTW89_MKK][1][59] = 127, [2][0][RTW89_MKK][0][59] = 127, [2][0][RTW89_IC][1][59] = 8, + [2][0][RTW89_IC][2][59] = 68, [2][0][RTW89_KCC][1][59] = -2, [2][0][RTW89_KCC][0][59] = 127, [2][0][RTW89_ACMA][1][59] = 127, @@ -54855,13 +57858,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][59] = 127, [2][0][RTW89_UK][1][59] = 127, [2][0][RTW89_UK][0][59] = 127, + [2][0][RTW89_THAILAND][1][59] = 127, + [2][0][RTW89_THAILAND][0][59] = 127, [2][0][RTW89_FCC][1][60] = 8, - [2][0][RTW89_FCC][2][60] = 78, + [2][0][RTW89_FCC][2][60] = 68, [2][0][RTW89_ETSI][1][60] = 127, [2][0][RTW89_ETSI][0][60] = 127, [2][0][RTW89_MKK][1][60] = 127, [2][0][RTW89_MKK][0][60] = 127, [2][0][RTW89_IC][1][60] = 8, + [2][0][RTW89_IC][2][60] = 68, [2][0][RTW89_KCC][1][60] = -2, [2][0][RTW89_KCC][0][60] = 127, [2][0][RTW89_ACMA][1][60] = 127, @@ -54871,13 +57877,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][60] = 127, [2][0][RTW89_UK][1][60] = 127, [2][0][RTW89_UK][0][60] = 127, + [2][0][RTW89_THAILAND][1][60] = 127, + [2][0][RTW89_THAILAND][0][60] = 127, [2][0][RTW89_FCC][1][62] = 8, - [2][0][RTW89_FCC][2][62] = 78, + [2][0][RTW89_FCC][2][62] = 68, [2][0][RTW89_ETSI][1][62] = 127, [2][0][RTW89_ETSI][0][62] = 127, [2][0][RTW89_MKK][1][62] = 127, [2][0][RTW89_MKK][0][62] = 127, [2][0][RTW89_IC][1][62] = 8, + [2][0][RTW89_IC][2][62] = 68, [2][0][RTW89_KCC][1][62] = -2, [2][0][RTW89_KCC][0][62] = 127, [2][0][RTW89_ACMA][1][62] = 127, @@ -54887,13 +57896,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][62] = 127, [2][0][RTW89_UK][1][62] = 127, [2][0][RTW89_UK][0][62] = 127, + [2][0][RTW89_THAILAND][1][62] = 127, + [2][0][RTW89_THAILAND][0][62] = 127, [2][0][RTW89_FCC][1][64] = 8, - [2][0][RTW89_FCC][2][64] = 78, + [2][0][RTW89_FCC][2][64] = 68, [2][0][RTW89_ETSI][1][64] = 127, [2][0][RTW89_ETSI][0][64] = 127, [2][0][RTW89_MKK][1][64] = 127, [2][0][RTW89_MKK][0][64] = 127, [2][0][RTW89_IC][1][64] = 8, + [2][0][RTW89_IC][2][64] = 68, [2][0][RTW89_KCC][1][64] = -2, [2][0][RTW89_KCC][0][64] = 127, [2][0][RTW89_ACMA][1][64] = 127, @@ -54903,13 +57915,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][64] = 127, [2][0][RTW89_UK][1][64] = 127, [2][0][RTW89_UK][0][64] = 127, + [2][0][RTW89_THAILAND][1][64] = 127, + [2][0][RTW89_THAILAND][0][64] = 127, [2][0][RTW89_FCC][1][66] = 8, - [2][0][RTW89_FCC][2][66] = 78, + [2][0][RTW89_FCC][2][66] = 68, [2][0][RTW89_ETSI][1][66] = 127, [2][0][RTW89_ETSI][0][66] = 127, [2][0][RTW89_MKK][1][66] = 127, [2][0][RTW89_MKK][0][66] = 127, [2][0][RTW89_IC][1][66] = 8, + [2][0][RTW89_IC][2][66] = 68, [2][0][RTW89_KCC][1][66] = -2, [2][0][RTW89_KCC][0][66] = 127, [2][0][RTW89_ACMA][1][66] = 127, @@ -54919,13 +57934,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][66] = 127, [2][0][RTW89_UK][1][66] = 127, [2][0][RTW89_UK][0][66] = 127, + [2][0][RTW89_THAILAND][1][66] = 127, + [2][0][RTW89_THAILAND][0][66] = 127, [2][0][RTW89_FCC][1][68] = 8, - [2][0][RTW89_FCC][2][68] = 78, + [2][0][RTW89_FCC][2][68] = 68, [2][0][RTW89_ETSI][1][68] = 127, [2][0][RTW89_ETSI][0][68] = 127, [2][0][RTW89_MKK][1][68] = 127, [2][0][RTW89_MKK][0][68] = 127, [2][0][RTW89_IC][1][68] = 8, + [2][0][RTW89_IC][2][68] = 68, [2][0][RTW89_KCC][1][68] = -2, [2][0][RTW89_KCC][0][68] = 127, [2][0][RTW89_ACMA][1][68] = 127, @@ -54935,13 +57953,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][68] = 127, [2][0][RTW89_UK][1][68] = 127, [2][0][RTW89_UK][0][68] = 127, + [2][0][RTW89_THAILAND][1][68] = 127, + [2][0][RTW89_THAILAND][0][68] = 127, [2][0][RTW89_FCC][1][70] = 8, - [2][0][RTW89_FCC][2][70] = 78, + [2][0][RTW89_FCC][2][70] = 68, [2][0][RTW89_ETSI][1][70] = 127, [2][0][RTW89_ETSI][0][70] = 127, [2][0][RTW89_MKK][1][70] = 127, [2][0][RTW89_MKK][0][70] = 127, [2][0][RTW89_IC][1][70] = 8, + [2][0][RTW89_IC][2][70] = 68, [2][0][RTW89_KCC][1][70] = -2, [2][0][RTW89_KCC][0][70] = 127, [2][0][RTW89_ACMA][1][70] = 127, @@ -54951,13 +57972,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][70] = 127, [2][0][RTW89_UK][1][70] = 127, [2][0][RTW89_UK][0][70] = 127, + [2][0][RTW89_THAILAND][1][70] = 127, + [2][0][RTW89_THAILAND][0][70] = 127, [2][0][RTW89_FCC][1][72] = 8, - [2][0][RTW89_FCC][2][72] = 78, + [2][0][RTW89_FCC][2][72] = 68, [2][0][RTW89_ETSI][1][72] = 127, [2][0][RTW89_ETSI][0][72] = 127, [2][0][RTW89_MKK][1][72] = 127, [2][0][RTW89_MKK][0][72] = 127, [2][0][RTW89_IC][1][72] = 8, + [2][0][RTW89_IC][2][72] = 68, [2][0][RTW89_KCC][1][72] = -2, [2][0][RTW89_KCC][0][72] = 127, [2][0][RTW89_ACMA][1][72] = 127, @@ -54967,13 +57991,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][72] = 127, [2][0][RTW89_UK][1][72] = 127, [2][0][RTW89_UK][0][72] = 127, + [2][0][RTW89_THAILAND][1][72] = 127, + [2][0][RTW89_THAILAND][0][72] = 127, [2][0][RTW89_FCC][1][74] = 8, - [2][0][RTW89_FCC][2][74] = 78, + [2][0][RTW89_FCC][2][74] = 68, [2][0][RTW89_ETSI][1][74] = 127, [2][0][RTW89_ETSI][0][74] = 127, [2][0][RTW89_MKK][1][74] = 127, [2][0][RTW89_MKK][0][74] = 127, [2][0][RTW89_IC][1][74] = 8, + [2][0][RTW89_IC][2][74] = 68, [2][0][RTW89_KCC][1][74] = -2, [2][0][RTW89_KCC][0][74] = 127, [2][0][RTW89_ACMA][1][74] = 127, @@ -54983,13 +58010,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][74] = 127, [2][0][RTW89_UK][1][74] = 127, [2][0][RTW89_UK][0][74] = 127, + [2][0][RTW89_THAILAND][1][74] = 127, + [2][0][RTW89_THAILAND][0][74] = 127, [2][0][RTW89_FCC][1][75] = 8, - [2][0][RTW89_FCC][2][75] = 78, + [2][0][RTW89_FCC][2][75] = 68, [2][0][RTW89_ETSI][1][75] = 127, [2][0][RTW89_ETSI][0][75] = 127, [2][0][RTW89_MKK][1][75] = 127, [2][0][RTW89_MKK][0][75] = 127, [2][0][RTW89_IC][1][75] = 8, + [2][0][RTW89_IC][2][75] = 68, [2][0][RTW89_KCC][1][75] = -2, [2][0][RTW89_KCC][0][75] = 127, [2][0][RTW89_ACMA][1][75] = 127, @@ -54999,13 +58029,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][75] = 127, [2][0][RTW89_UK][1][75] = 127, [2][0][RTW89_UK][0][75] = 127, + [2][0][RTW89_THAILAND][1][75] = 127, + [2][0][RTW89_THAILAND][0][75] = 127, [2][0][RTW89_FCC][1][77] = 8, - [2][0][RTW89_FCC][2][77] = 78, + [2][0][RTW89_FCC][2][77] = 68, [2][0][RTW89_ETSI][1][77] = 127, [2][0][RTW89_ETSI][0][77] = 127, [2][0][RTW89_MKK][1][77] = 127, [2][0][RTW89_MKK][0][77] = 127, [2][0][RTW89_IC][1][77] = 8, + [2][0][RTW89_IC][2][77] = 68, [2][0][RTW89_KCC][1][77] = -2, [2][0][RTW89_KCC][0][77] = 127, [2][0][RTW89_ACMA][1][77] = 127, @@ -55015,13 +58048,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][77] = 127, [2][0][RTW89_UK][1][77] = 127, [2][0][RTW89_UK][0][77] = 127, + [2][0][RTW89_THAILAND][1][77] = 127, + [2][0][RTW89_THAILAND][0][77] = 127, [2][0][RTW89_FCC][1][79] = 8, - [2][0][RTW89_FCC][2][79] = 78, + [2][0][RTW89_FCC][2][79] = 68, [2][0][RTW89_ETSI][1][79] = 127, [2][0][RTW89_ETSI][0][79] = 127, [2][0][RTW89_MKK][1][79] = 127, [2][0][RTW89_MKK][0][79] = 127, [2][0][RTW89_IC][1][79] = 8, + [2][0][RTW89_IC][2][79] = 68, [2][0][RTW89_KCC][1][79] = -2, [2][0][RTW89_KCC][0][79] = 127, [2][0][RTW89_ACMA][1][79] = 127, @@ -55031,13 +58067,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][79] = 127, [2][0][RTW89_UK][1][79] = 127, [2][0][RTW89_UK][0][79] = 127, + [2][0][RTW89_THAILAND][1][79] = 127, + [2][0][RTW89_THAILAND][0][79] = 127, [2][0][RTW89_FCC][1][81] = 8, - [2][0][RTW89_FCC][2][81] = 78, + [2][0][RTW89_FCC][2][81] = 68, [2][0][RTW89_ETSI][1][81] = 127, [2][0][RTW89_ETSI][0][81] = 127, [2][0][RTW89_MKK][1][81] = 127, [2][0][RTW89_MKK][0][81] = 127, [2][0][RTW89_IC][1][81] = 8, + [2][0][RTW89_IC][2][81] = 68, [2][0][RTW89_KCC][1][81] = -2, [2][0][RTW89_KCC][0][81] = 127, [2][0][RTW89_ACMA][1][81] = 127, @@ -55047,13 +58086,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][81] = 127, [2][0][RTW89_UK][1][81] = 127, [2][0][RTW89_UK][0][81] = 127, + [2][0][RTW89_THAILAND][1][81] = 127, + [2][0][RTW89_THAILAND][0][81] = 127, [2][0][RTW89_FCC][1][83] = 8, - [2][0][RTW89_FCC][2][83] = 78, + [2][0][RTW89_FCC][2][83] = 68, [2][0][RTW89_ETSI][1][83] = 127, [2][0][RTW89_ETSI][0][83] = 127, [2][0][RTW89_MKK][1][83] = 127, [2][0][RTW89_MKK][0][83] = 127, [2][0][RTW89_IC][1][83] = 8, + [2][0][RTW89_IC][2][83] = 68, [2][0][RTW89_KCC][1][83] = -2, [2][0][RTW89_KCC][0][83] = 127, [2][0][RTW89_ACMA][1][83] = 127, @@ -55063,13 +58105,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][83] = 127, [2][0][RTW89_UK][1][83] = 127, [2][0][RTW89_UK][0][83] = 127, + [2][0][RTW89_THAILAND][1][83] = 127, + [2][0][RTW89_THAILAND][0][83] = 127, [2][0][RTW89_FCC][1][85] = 8, - [2][0][RTW89_FCC][2][85] = 78, + [2][0][RTW89_FCC][2][85] = 68, [2][0][RTW89_ETSI][1][85] = 127, [2][0][RTW89_ETSI][0][85] = 127, [2][0][RTW89_MKK][1][85] = 127, [2][0][RTW89_MKK][0][85] = 127, [2][0][RTW89_IC][1][85] = 8, + [2][0][RTW89_IC][2][85] = 68, [2][0][RTW89_KCC][1][85] = -2, [2][0][RTW89_KCC][0][85] = 127, [2][0][RTW89_ACMA][1][85] = 127, @@ -55079,6 +58124,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][85] = 127, [2][0][RTW89_UK][1][85] = 127, [2][0][RTW89_UK][0][85] = 127, + [2][0][RTW89_THAILAND][1][85] = 127, + [2][0][RTW89_THAILAND][0][85] = 127, [2][0][RTW89_FCC][1][87] = 8, [2][0][RTW89_FCC][2][87] = 127, [2][0][RTW89_ETSI][1][87] = 127, @@ -55086,6 +58133,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][87] = 127, [2][0][RTW89_MKK][0][87] = 127, [2][0][RTW89_IC][1][87] = 8, + [2][0][RTW89_IC][2][87] = 127, [2][0][RTW89_KCC][1][87] = -2, [2][0][RTW89_KCC][0][87] = 127, [2][0][RTW89_ACMA][1][87] = 127, @@ -55095,6 +58143,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][87] = 127, [2][0][RTW89_UK][1][87] = 127, [2][0][RTW89_UK][0][87] = 127, + [2][0][RTW89_THAILAND][1][87] = 127, + [2][0][RTW89_THAILAND][0][87] = 127, [2][0][RTW89_FCC][1][89] = 8, [2][0][RTW89_FCC][2][89] = 127, [2][0][RTW89_ETSI][1][89] = 127, @@ -55102,6 +58152,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][89] = 127, [2][0][RTW89_MKK][0][89] = 127, [2][0][RTW89_IC][1][89] = 8, + [2][0][RTW89_IC][2][89] = 127, [2][0][RTW89_KCC][1][89] = -2, [2][0][RTW89_KCC][0][89] = 127, [2][0][RTW89_ACMA][1][89] = 127, @@ -55111,6 +58162,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][89] = 127, [2][0][RTW89_UK][1][89] = 127, [2][0][RTW89_UK][0][89] = 127, + [2][0][RTW89_THAILAND][1][89] = 127, + [2][0][RTW89_THAILAND][0][89] = 127, [2][0][RTW89_FCC][1][90] = 8, [2][0][RTW89_FCC][2][90] = 127, [2][0][RTW89_ETSI][1][90] = 127, @@ -55118,6 +58171,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][90] = 127, [2][0][RTW89_MKK][0][90] = 127, [2][0][RTW89_IC][1][90] = 8, + [2][0][RTW89_IC][2][90] = 127, [2][0][RTW89_KCC][1][90] = -2, [2][0][RTW89_KCC][0][90] = 127, [2][0][RTW89_ACMA][1][90] = 127, @@ -55127,6 +58181,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][90] = 127, [2][0][RTW89_UK][1][90] = 127, [2][0][RTW89_UK][0][90] = 127, + [2][0][RTW89_THAILAND][1][90] = 127, + [2][0][RTW89_THAILAND][0][90] = 127, [2][0][RTW89_FCC][1][92] = 8, [2][0][RTW89_FCC][2][92] = 127, [2][0][RTW89_ETSI][1][92] = 127, @@ -55134,6 +58190,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][92] = 127, [2][0][RTW89_MKK][0][92] = 127, [2][0][RTW89_IC][1][92] = 8, + [2][0][RTW89_IC][2][92] = 127, [2][0][RTW89_KCC][1][92] = -2, [2][0][RTW89_KCC][0][92] = 127, [2][0][RTW89_ACMA][1][92] = 127, @@ -55143,6 +58200,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][92] = 127, [2][0][RTW89_UK][1][92] = 127, [2][0][RTW89_UK][0][92] = 127, + [2][0][RTW89_THAILAND][1][92] = 127, + [2][0][RTW89_THAILAND][0][92] = 127, [2][0][RTW89_FCC][1][94] = 8, [2][0][RTW89_FCC][2][94] = 127, [2][0][RTW89_ETSI][1][94] = 127, @@ -55150,6 +58209,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][94] = 127, [2][0][RTW89_MKK][0][94] = 127, [2][0][RTW89_IC][1][94] = 8, + [2][0][RTW89_IC][2][94] = 127, [2][0][RTW89_KCC][1][94] = -2, [2][0][RTW89_KCC][0][94] = 127, [2][0][RTW89_ACMA][1][94] = 127, @@ -55159,6 +58219,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][94] = 127, [2][0][RTW89_UK][1][94] = 127, [2][0][RTW89_UK][0][94] = 127, + [2][0][RTW89_THAILAND][1][94] = 127, + [2][0][RTW89_THAILAND][0][94] = 127, [2][0][RTW89_FCC][1][96] = 8, [2][0][RTW89_FCC][2][96] = 127, [2][0][RTW89_ETSI][1][96] = 127, @@ -55166,6 +58228,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][96] = 127, [2][0][RTW89_MKK][0][96] = 127, [2][0][RTW89_IC][1][96] = 8, + [2][0][RTW89_IC][2][96] = 127, [2][0][RTW89_KCC][1][96] = -2, [2][0][RTW89_KCC][0][96] = 127, [2][0][RTW89_ACMA][1][96] = 127, @@ -55175,6 +58238,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][96] = 127, [2][0][RTW89_UK][1][96] = 127, [2][0][RTW89_UK][0][96] = 127, + [2][0][RTW89_THAILAND][1][96] = 127, + [2][0][RTW89_THAILAND][0][96] = 127, [2][0][RTW89_FCC][1][98] = 8, [2][0][RTW89_FCC][2][98] = 127, [2][0][RTW89_ETSI][1][98] = 127, @@ -55182,6 +58247,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][98] = 127, [2][0][RTW89_MKK][0][98] = 127, [2][0][RTW89_IC][1][98] = 8, + [2][0][RTW89_IC][2][98] = 127, [2][0][RTW89_KCC][1][98] = -2, [2][0][RTW89_KCC][0][98] = 127, [2][0][RTW89_ACMA][1][98] = 127, @@ -55191,6 +58257,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][98] = 127, [2][0][RTW89_UK][1][98] = 127, [2][0][RTW89_UK][0][98] = 127, + [2][0][RTW89_THAILAND][1][98] = 127, + [2][0][RTW89_THAILAND][0][98] = 127, [2][0][RTW89_FCC][1][100] = 8, [2][0][RTW89_FCC][2][100] = 127, [2][0][RTW89_ETSI][1][100] = 127, @@ -55198,6 +58266,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][100] = 127, [2][0][RTW89_MKK][0][100] = 127, [2][0][RTW89_IC][1][100] = 8, + [2][0][RTW89_IC][2][100] = 127, [2][0][RTW89_KCC][1][100] = -2, [2][0][RTW89_KCC][0][100] = 127, [2][0][RTW89_ACMA][1][100] = 127, @@ -55207,6 +58276,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][100] = 127, [2][0][RTW89_UK][1][100] = 127, [2][0][RTW89_UK][0][100] = 127, + [2][0][RTW89_THAILAND][1][100] = 127, + [2][0][RTW89_THAILAND][0][100] = 127, [2][0][RTW89_FCC][1][102] = 8, [2][0][RTW89_FCC][2][102] = 127, [2][0][RTW89_ETSI][1][102] = 127, @@ -55214,6 +58285,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][102] = 127, [2][0][RTW89_MKK][0][102] = 127, [2][0][RTW89_IC][1][102] = 8, + [2][0][RTW89_IC][2][102] = 127, [2][0][RTW89_KCC][1][102] = -2, [2][0][RTW89_KCC][0][102] = 127, [2][0][RTW89_ACMA][1][102] = 127, @@ -55223,6 +58295,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][102] = 127, [2][0][RTW89_UK][1][102] = 127, [2][0][RTW89_UK][0][102] = 127, + [2][0][RTW89_THAILAND][1][102] = 127, + [2][0][RTW89_THAILAND][0][102] = 127, [2][0][RTW89_FCC][1][104] = 8, [2][0][RTW89_FCC][2][104] = 127, [2][0][RTW89_ETSI][1][104] = 127, @@ -55230,6 +58304,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][104] = 127, [2][0][RTW89_MKK][0][104] = 127, [2][0][RTW89_IC][1][104] = 8, + [2][0][RTW89_IC][2][104] = 127, [2][0][RTW89_KCC][1][104] = -2, [2][0][RTW89_KCC][0][104] = 127, [2][0][RTW89_ACMA][1][104] = 127, @@ -55239,6 +58314,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][104] = 127, [2][0][RTW89_UK][1][104] = 127, [2][0][RTW89_UK][0][104] = 127, + [2][0][RTW89_THAILAND][1][104] = 127, + [2][0][RTW89_THAILAND][0][104] = 127, [2][0][RTW89_FCC][1][105] = 8, [2][0][RTW89_FCC][2][105] = 127, [2][0][RTW89_ETSI][1][105] = 127, @@ -55246,6 +58323,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][105] = 127, [2][0][RTW89_MKK][0][105] = 127, [2][0][RTW89_IC][1][105] = 8, + [2][0][RTW89_IC][2][105] = 127, [2][0][RTW89_KCC][1][105] = -2, [2][0][RTW89_KCC][0][105] = 127, [2][0][RTW89_ACMA][1][105] = 127, @@ -55255,6 +58333,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][105] = 127, [2][0][RTW89_UK][1][105] = 127, [2][0][RTW89_UK][0][105] = 127, + [2][0][RTW89_THAILAND][1][105] = 127, + [2][0][RTW89_THAILAND][0][105] = 127, [2][0][RTW89_FCC][1][107] = 10, [2][0][RTW89_FCC][2][107] = 127, [2][0][RTW89_ETSI][1][107] = 127, @@ -55262,6 +58342,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][107] = 127, [2][0][RTW89_MKK][0][107] = 127, [2][0][RTW89_IC][1][107] = 10, + [2][0][RTW89_IC][2][107] = 127, [2][0][RTW89_KCC][1][107] = -2, [2][0][RTW89_KCC][0][107] = 127, [2][0][RTW89_ACMA][1][107] = 127, @@ -55271,6 +58352,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][107] = 127, [2][0][RTW89_UK][1][107] = 127, [2][0][RTW89_UK][0][107] = 127, + [2][0][RTW89_THAILAND][1][107] = 127, + [2][0][RTW89_THAILAND][0][107] = 127, [2][0][RTW89_FCC][1][109] = 12, [2][0][RTW89_FCC][2][109] = 127, [2][0][RTW89_ETSI][1][109] = 127, @@ -55278,6 +58361,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][109] = 127, [2][0][RTW89_MKK][0][109] = 127, [2][0][RTW89_IC][1][109] = 12, + [2][0][RTW89_IC][2][109] = 127, [2][0][RTW89_KCC][1][109] = 127, [2][0][RTW89_KCC][0][109] = 127, [2][0][RTW89_ACMA][1][109] = 127, @@ -55287,6 +58371,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][109] = 127, [2][0][RTW89_UK][1][109] = 127, [2][0][RTW89_UK][0][109] = 127, + [2][0][RTW89_THAILAND][1][109] = 127, + [2][0][RTW89_THAILAND][0][109] = 127, [2][0][RTW89_FCC][1][111] = 127, [2][0][RTW89_FCC][2][111] = 127, [2][0][RTW89_ETSI][1][111] = 127, @@ -55294,6 +58380,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][111] = 127, [2][0][RTW89_MKK][0][111] = 127, [2][0][RTW89_IC][1][111] = 127, + [2][0][RTW89_IC][2][111] = 127, [2][0][RTW89_KCC][1][111] = 127, [2][0][RTW89_KCC][0][111] = 127, [2][0][RTW89_ACMA][1][111] = 127, @@ -55303,6 +58390,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][111] = 127, [2][0][RTW89_UK][1][111] = 127, [2][0][RTW89_UK][0][111] = 127, + [2][0][RTW89_THAILAND][1][111] = 127, + [2][0][RTW89_THAILAND][0][111] = 127, [2][0][RTW89_FCC][1][113] = 127, [2][0][RTW89_FCC][2][113] = 127, [2][0][RTW89_ETSI][1][113] = 127, @@ -55310,6 +58399,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][113] = 127, [2][0][RTW89_MKK][0][113] = 127, [2][0][RTW89_IC][1][113] = 127, + [2][0][RTW89_IC][2][113] = 127, [2][0][RTW89_KCC][1][113] = 127, [2][0][RTW89_KCC][0][113] = 127, [2][0][RTW89_ACMA][1][113] = 127, @@ -55319,6 +58409,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][113] = 127, [2][0][RTW89_UK][1][113] = 127, [2][0][RTW89_UK][0][113] = 127, + [2][0][RTW89_THAILAND][1][113] = 127, + [2][0][RTW89_THAILAND][0][113] = 127, [2][0][RTW89_FCC][1][115] = 127, [2][0][RTW89_FCC][2][115] = 127, [2][0][RTW89_ETSI][1][115] = 127, @@ -55326,6 +58418,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][115] = 127, [2][0][RTW89_MKK][0][115] = 127, [2][0][RTW89_IC][1][115] = 127, + [2][0][RTW89_IC][2][115] = 127, [2][0][RTW89_KCC][1][115] = 127, [2][0][RTW89_KCC][0][115] = 127, [2][0][RTW89_ACMA][1][115] = 127, @@ -55335,6 +58428,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][115] = 127, [2][0][RTW89_UK][1][115] = 127, [2][0][RTW89_UK][0][115] = 127, + [2][0][RTW89_THAILAND][1][115] = 127, + [2][0][RTW89_THAILAND][0][115] = 127, [2][0][RTW89_FCC][1][117] = 127, [2][0][RTW89_FCC][2][117] = 127, [2][0][RTW89_ETSI][1][117] = 127, @@ -55342,6 +58437,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][117] = 127, [2][0][RTW89_MKK][0][117] = 127, [2][0][RTW89_IC][1][117] = 127, + [2][0][RTW89_IC][2][117] = 127, [2][0][RTW89_KCC][1][117] = 127, [2][0][RTW89_KCC][0][117] = 127, [2][0][RTW89_ACMA][1][117] = 127, @@ -55351,6 +58447,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][117] = 127, [2][0][RTW89_UK][1][117] = 127, [2][0][RTW89_UK][0][117] = 127, + [2][0][RTW89_THAILAND][1][117] = 127, + [2][0][RTW89_THAILAND][0][117] = 127, [2][0][RTW89_FCC][1][119] = 127, [2][0][RTW89_FCC][2][119] = 127, [2][0][RTW89_ETSI][1][119] = 127, @@ -55358,6 +58456,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][119] = 127, [2][0][RTW89_MKK][0][119] = 127, [2][0][RTW89_IC][1][119] = 127, + [2][0][RTW89_IC][2][119] = 127, [2][0][RTW89_KCC][1][119] = 127, [2][0][RTW89_KCC][0][119] = 127, [2][0][RTW89_ACMA][1][119] = 127, @@ -55367,6 +58466,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][119] = 127, [2][0][RTW89_UK][1][119] = 127, [2][0][RTW89_UK][0][119] = 127, + [2][0][RTW89_THAILAND][1][119] = 127, + [2][0][RTW89_THAILAND][0][119] = 127, [2][1][RTW89_FCC][1][0] = -16, [2][1][RTW89_FCC][2][0] = 54, [2][1][RTW89_ETSI][1][0] = 44, @@ -55374,6 +58475,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][0] = 42, [2][1][RTW89_MKK][0][0] = 2, [2][1][RTW89_IC][1][0] = -16, + [2][1][RTW89_IC][2][0] = 54, [2][1][RTW89_KCC][1][0] = -14, [2][1][RTW89_KCC][0][0] = -14, [2][1][RTW89_ACMA][1][0] = 44, @@ -55383,6 +58485,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][0] = 6, [2][1][RTW89_UK][1][0] = 44, [2][1][RTW89_UK][0][0] = 6, + [2][1][RTW89_THAILAND][1][0] = 28, + [2][1][RTW89_THAILAND][0][0] = -16, [2][1][RTW89_FCC][1][2] = -16, [2][1][RTW89_FCC][2][2] = 54, [2][1][RTW89_ETSI][1][2] = 44, @@ -55390,6 +58494,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][2] = 40, [2][1][RTW89_MKK][0][2] = 2, [2][1][RTW89_IC][1][2] = -16, + [2][1][RTW89_IC][2][2] = 54, [2][1][RTW89_KCC][1][2] = -14, [2][1][RTW89_KCC][0][2] = -14, [2][1][RTW89_ACMA][1][2] = 44, @@ -55399,6 +58504,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][2] = 6, [2][1][RTW89_UK][1][2] = 44, [2][1][RTW89_UK][0][2] = 6, + [2][1][RTW89_THAILAND][1][2] = 28, + [2][1][RTW89_THAILAND][0][2] = -16, [2][1][RTW89_FCC][1][4] = -16, [2][1][RTW89_FCC][2][4] = 54, [2][1][RTW89_ETSI][1][4] = 44, @@ -55406,6 +58513,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][4] = 40, [2][1][RTW89_MKK][0][4] = 2, [2][1][RTW89_IC][1][4] = -16, + [2][1][RTW89_IC][2][4] = 54, [2][1][RTW89_KCC][1][4] = -14, [2][1][RTW89_KCC][0][4] = -14, [2][1][RTW89_ACMA][1][4] = 44, @@ -55415,6 +58523,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][4] = 6, [2][1][RTW89_UK][1][4] = 44, [2][1][RTW89_UK][0][4] = 6, + [2][1][RTW89_THAILAND][1][4] = 28, + [2][1][RTW89_THAILAND][0][4] = -16, [2][1][RTW89_FCC][1][6] = -16, [2][1][RTW89_FCC][2][6] = 54, [2][1][RTW89_ETSI][1][6] = 44, @@ -55422,6 +58532,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][6] = 40, [2][1][RTW89_MKK][0][6] = 2, [2][1][RTW89_IC][1][6] = -16, + [2][1][RTW89_IC][2][6] = 54, [2][1][RTW89_KCC][1][6] = -14, [2][1][RTW89_KCC][0][6] = -14, [2][1][RTW89_ACMA][1][6] = 44, @@ -55431,6 +58542,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][6] = 6, [2][1][RTW89_UK][1][6] = 44, [2][1][RTW89_UK][0][6] = 6, + [2][1][RTW89_THAILAND][1][6] = 28, + [2][1][RTW89_THAILAND][0][6] = -16, [2][1][RTW89_FCC][1][8] = -16, [2][1][RTW89_FCC][2][8] = 54, [2][1][RTW89_ETSI][1][8] = 44, @@ -55438,6 +58551,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][8] = 40, [2][1][RTW89_MKK][0][8] = 2, [2][1][RTW89_IC][1][8] = -16, + [2][1][RTW89_IC][2][8] = 54, [2][1][RTW89_KCC][1][8] = -14, [2][1][RTW89_KCC][0][8] = -14, [2][1][RTW89_ACMA][1][8] = 44, @@ -55447,6 +58561,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][8] = 6, [2][1][RTW89_UK][1][8] = 44, [2][1][RTW89_UK][0][8] = 6, + [2][1][RTW89_THAILAND][1][8] = 28, + [2][1][RTW89_THAILAND][0][8] = -16, [2][1][RTW89_FCC][1][10] = -16, [2][1][RTW89_FCC][2][10] = 54, [2][1][RTW89_ETSI][1][10] = 44, @@ -55454,6 +58570,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][10] = 40, [2][1][RTW89_MKK][0][10] = 2, [2][1][RTW89_IC][1][10] = -16, + [2][1][RTW89_IC][2][10] = 54, [2][1][RTW89_KCC][1][10] = -14, [2][1][RTW89_KCC][0][10] = -14, [2][1][RTW89_ACMA][1][10] = 44, @@ -55463,6 +58580,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][10] = 6, [2][1][RTW89_UK][1][10] = 44, [2][1][RTW89_UK][0][10] = 6, + [2][1][RTW89_THAILAND][1][10] = 28, + [2][1][RTW89_THAILAND][0][10] = -16, [2][1][RTW89_FCC][1][12] = -16, [2][1][RTW89_FCC][2][12] = 54, [2][1][RTW89_ETSI][1][12] = 44, @@ -55470,6 +58589,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][12] = 40, [2][1][RTW89_MKK][0][12] = 2, [2][1][RTW89_IC][1][12] = -16, + [2][1][RTW89_IC][2][12] = 54, [2][1][RTW89_KCC][1][12] = -14, [2][1][RTW89_KCC][0][12] = -14, [2][1][RTW89_ACMA][1][12] = 44, @@ -55479,6 +58599,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][12] = 6, [2][1][RTW89_UK][1][12] = 44, [2][1][RTW89_UK][0][12] = 6, + [2][1][RTW89_THAILAND][1][12] = 28, + [2][1][RTW89_THAILAND][0][12] = -16, [2][1][RTW89_FCC][1][14] = -16, [2][1][RTW89_FCC][2][14] = 54, [2][1][RTW89_ETSI][1][14] = 44, @@ -55486,6 +58608,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][14] = 40, [2][1][RTW89_MKK][0][14] = 2, [2][1][RTW89_IC][1][14] = -16, + [2][1][RTW89_IC][2][14] = 54, [2][1][RTW89_KCC][1][14] = -14, [2][1][RTW89_KCC][0][14] = -14, [2][1][RTW89_ACMA][1][14] = 44, @@ -55495,6 +58618,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][14] = 6, [2][1][RTW89_UK][1][14] = 44, [2][1][RTW89_UK][0][14] = 6, + [2][1][RTW89_THAILAND][1][14] = 28, + [2][1][RTW89_THAILAND][0][14] = -16, [2][1][RTW89_FCC][1][15] = -16, [2][1][RTW89_FCC][2][15] = 54, [2][1][RTW89_ETSI][1][15] = 44, @@ -55502,6 +58627,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][15] = 40, [2][1][RTW89_MKK][0][15] = 2, [2][1][RTW89_IC][1][15] = -16, + [2][1][RTW89_IC][2][15] = 54, [2][1][RTW89_KCC][1][15] = -14, [2][1][RTW89_KCC][0][15] = -14, [2][1][RTW89_ACMA][1][15] = 44, @@ -55511,6 +58637,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][15] = 6, [2][1][RTW89_UK][1][15] = 44, [2][1][RTW89_UK][0][15] = 6, + [2][1][RTW89_THAILAND][1][15] = 28, + [2][1][RTW89_THAILAND][0][15] = -16, [2][1][RTW89_FCC][1][17] = -16, [2][1][RTW89_FCC][2][17] = 54, [2][1][RTW89_ETSI][1][17] = 44, @@ -55518,6 +58646,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][17] = 40, [2][1][RTW89_MKK][0][17] = 2, [2][1][RTW89_IC][1][17] = -16, + [2][1][RTW89_IC][2][17] = 54, [2][1][RTW89_KCC][1][17] = -14, [2][1][RTW89_KCC][0][17] = -14, [2][1][RTW89_ACMA][1][17] = 44, @@ -55527,6 +58656,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][17] = 6, [2][1][RTW89_UK][1][17] = 44, [2][1][RTW89_UK][0][17] = 6, + [2][1][RTW89_THAILAND][1][17] = 28, + [2][1][RTW89_THAILAND][0][17] = -16, [2][1][RTW89_FCC][1][19] = -16, [2][1][RTW89_FCC][2][19] = 54, [2][1][RTW89_ETSI][1][19] = 44, @@ -55534,6 +58665,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][19] = 40, [2][1][RTW89_MKK][0][19] = 2, [2][1][RTW89_IC][1][19] = -16, + [2][1][RTW89_IC][2][19] = 54, [2][1][RTW89_KCC][1][19] = -14, [2][1][RTW89_KCC][0][19] = -14, [2][1][RTW89_ACMA][1][19] = 44, @@ -55543,6 +58675,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][19] = 6, [2][1][RTW89_UK][1][19] = 44, [2][1][RTW89_UK][0][19] = 6, + [2][1][RTW89_THAILAND][1][19] = 28, + [2][1][RTW89_THAILAND][0][19] = -16, [2][1][RTW89_FCC][1][21] = -16, [2][1][RTW89_FCC][2][21] = 54, [2][1][RTW89_ETSI][1][21] = 44, @@ -55550,6 +58684,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][21] = 40, [2][1][RTW89_MKK][0][21] = 2, [2][1][RTW89_IC][1][21] = -16, + [2][1][RTW89_IC][2][21] = 54, [2][1][RTW89_KCC][1][21] = -14, [2][1][RTW89_KCC][0][21] = -14, [2][1][RTW89_ACMA][1][21] = 44, @@ -55559,6 +58694,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][21] = 6, [2][1][RTW89_UK][1][21] = 44, [2][1][RTW89_UK][0][21] = 6, + [2][1][RTW89_THAILAND][1][21] = 28, + [2][1][RTW89_THAILAND][0][21] = -16, [2][1][RTW89_FCC][1][23] = -16, [2][1][RTW89_FCC][2][23] = 54, [2][1][RTW89_ETSI][1][23] = 44, @@ -55566,6 +58703,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][23] = 40, [2][1][RTW89_MKK][0][23] = 2, [2][1][RTW89_IC][1][23] = -16, + [2][1][RTW89_IC][2][23] = 54, [2][1][RTW89_KCC][1][23] = -14, [2][1][RTW89_KCC][0][23] = -14, [2][1][RTW89_ACMA][1][23] = 44, @@ -55575,6 +58713,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][23] = 6, [2][1][RTW89_UK][1][23] = 44, [2][1][RTW89_UK][0][23] = 6, + [2][1][RTW89_THAILAND][1][23] = 30, + [2][1][RTW89_THAILAND][0][23] = -16, [2][1][RTW89_FCC][1][25] = -16, [2][1][RTW89_FCC][2][25] = 54, [2][1][RTW89_ETSI][1][25] = 44, @@ -55582,6 +58722,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][25] = 40, [2][1][RTW89_MKK][0][25] = 2, [2][1][RTW89_IC][1][25] = -16, + [2][1][RTW89_IC][2][25] = 54, [2][1][RTW89_KCC][1][25] = -14, [2][1][RTW89_KCC][0][25] = -14, [2][1][RTW89_ACMA][1][25] = 44, @@ -55591,6 +58732,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][25] = 6, [2][1][RTW89_UK][1][25] = 44, [2][1][RTW89_UK][0][25] = 6, + [2][1][RTW89_THAILAND][1][25] = 28, + [2][1][RTW89_THAILAND][0][25] = -16, [2][1][RTW89_FCC][1][27] = -16, [2][1][RTW89_FCC][2][27] = 54, [2][1][RTW89_ETSI][1][27] = 44, @@ -55598,6 +58741,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][27] = 40, [2][1][RTW89_MKK][0][27] = 2, [2][1][RTW89_IC][1][27] = -16, + [2][1][RTW89_IC][2][27] = 54, [2][1][RTW89_KCC][1][27] = -14, [2][1][RTW89_KCC][0][27] = -14, [2][1][RTW89_ACMA][1][27] = 44, @@ -55607,6 +58751,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][27] = 6, [2][1][RTW89_UK][1][27] = 44, [2][1][RTW89_UK][0][27] = 6, + [2][1][RTW89_THAILAND][1][27] = 28, + [2][1][RTW89_THAILAND][0][27] = -16, [2][1][RTW89_FCC][1][29] = -16, [2][1][RTW89_FCC][2][29] = 54, [2][1][RTW89_ETSI][1][29] = 44, @@ -55614,6 +58760,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][29] = 40, [2][1][RTW89_MKK][0][29] = 2, [2][1][RTW89_IC][1][29] = -16, + [2][1][RTW89_IC][2][29] = 54, [2][1][RTW89_KCC][1][29] = -14, [2][1][RTW89_KCC][0][29] = -14, [2][1][RTW89_ACMA][1][29] = 44, @@ -55623,6 +58770,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][29] = 6, [2][1][RTW89_UK][1][29] = 44, [2][1][RTW89_UK][0][29] = 6, + [2][1][RTW89_THAILAND][1][29] = 28, + [2][1][RTW89_THAILAND][0][29] = -16, [2][1][RTW89_FCC][1][30] = -16, [2][1][RTW89_FCC][2][30] = 54, [2][1][RTW89_ETSI][1][30] = 44, @@ -55630,6 +58779,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][30] = 40, [2][1][RTW89_MKK][0][30] = 2, [2][1][RTW89_IC][1][30] = -16, + [2][1][RTW89_IC][2][30] = 54, [2][1][RTW89_KCC][1][30] = -14, [2][1][RTW89_KCC][0][30] = -14, [2][1][RTW89_ACMA][1][30] = 44, @@ -55639,6 +58789,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][30] = 6, [2][1][RTW89_UK][1][30] = 44, [2][1][RTW89_UK][0][30] = 6, + [2][1][RTW89_THAILAND][1][30] = 28, + [2][1][RTW89_THAILAND][0][30] = -16, [2][1][RTW89_FCC][1][32] = -16, [2][1][RTW89_FCC][2][32] = 54, [2][1][RTW89_ETSI][1][32] = 44, @@ -55646,6 +58798,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][32] = 40, [2][1][RTW89_MKK][0][32] = 2, [2][1][RTW89_IC][1][32] = -16, + [2][1][RTW89_IC][2][32] = 54, [2][1][RTW89_KCC][1][32] = -14, [2][1][RTW89_KCC][0][32] = -14, [2][1][RTW89_ACMA][1][32] = 44, @@ -55655,6 +58808,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][32] = 6, [2][1][RTW89_UK][1][32] = 44, [2][1][RTW89_UK][0][32] = 6, + [2][1][RTW89_THAILAND][1][32] = 28, + [2][1][RTW89_THAILAND][0][32] = -16, [2][1][RTW89_FCC][1][34] = -16, [2][1][RTW89_FCC][2][34] = 54, [2][1][RTW89_ETSI][1][34] = 44, @@ -55662,6 +58817,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][34] = 40, [2][1][RTW89_MKK][0][34] = 2, [2][1][RTW89_IC][1][34] = -16, + [2][1][RTW89_IC][2][34] = 54, [2][1][RTW89_KCC][1][34] = -14, [2][1][RTW89_KCC][0][34] = -14, [2][1][RTW89_ACMA][1][34] = 44, @@ -55671,6 +58827,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][34] = 6, [2][1][RTW89_UK][1][34] = 44, [2][1][RTW89_UK][0][34] = 6, + [2][1][RTW89_THAILAND][1][34] = 28, + [2][1][RTW89_THAILAND][0][34] = -16, [2][1][RTW89_FCC][1][36] = -16, [2][1][RTW89_FCC][2][36] = 54, [2][1][RTW89_ETSI][1][36] = 44, @@ -55678,6 +58836,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][36] = 40, [2][1][RTW89_MKK][0][36] = 2, [2][1][RTW89_IC][1][36] = -16, + [2][1][RTW89_IC][2][36] = 54, [2][1][RTW89_KCC][1][36] = -14, [2][1][RTW89_KCC][0][36] = -14, [2][1][RTW89_ACMA][1][36] = 44, @@ -55687,6 +58846,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][36] = 6, [2][1][RTW89_UK][1][36] = 44, [2][1][RTW89_UK][0][36] = 6, + [2][1][RTW89_THAILAND][1][36] = 28, + [2][1][RTW89_THAILAND][0][36] = -16, [2][1][RTW89_FCC][1][38] = -16, [2][1][RTW89_FCC][2][38] = 54, [2][1][RTW89_ETSI][1][38] = 44, @@ -55694,6 +58855,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][38] = 40, [2][1][RTW89_MKK][0][38] = 2, [2][1][RTW89_IC][1][38] = -16, + [2][1][RTW89_IC][2][38] = 54, [2][1][RTW89_KCC][1][38] = -14, [2][1][RTW89_KCC][0][38] = -14, [2][1][RTW89_ACMA][1][38] = 44, @@ -55703,6 +58865,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][38] = 6, [2][1][RTW89_UK][1][38] = 44, [2][1][RTW89_UK][0][38] = 6, + [2][1][RTW89_THAILAND][1][38] = 28, + [2][1][RTW89_THAILAND][0][38] = -16, [2][1][RTW89_FCC][1][40] = -16, [2][1][RTW89_FCC][2][40] = 54, [2][1][RTW89_ETSI][1][40] = 44, @@ -55710,6 +58874,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][40] = 40, [2][1][RTW89_MKK][0][40] = 2, [2][1][RTW89_IC][1][40] = -16, + [2][1][RTW89_IC][2][40] = 54, [2][1][RTW89_KCC][1][40] = -14, [2][1][RTW89_KCC][0][40] = -14, [2][1][RTW89_ACMA][1][40] = 44, @@ -55719,6 +58884,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][40] = 6, [2][1][RTW89_UK][1][40] = 44, [2][1][RTW89_UK][0][40] = 6, + [2][1][RTW89_THAILAND][1][40] = 28, + [2][1][RTW89_THAILAND][0][40] = -16, [2][1][RTW89_FCC][1][42] = -16, [2][1][RTW89_FCC][2][42] = 54, [2][1][RTW89_ETSI][1][42] = 44, @@ -55726,6 +58893,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][42] = 40, [2][1][RTW89_MKK][0][42] = 2, [2][1][RTW89_IC][1][42] = -16, + [2][1][RTW89_IC][2][42] = 54, [2][1][RTW89_KCC][1][42] = -14, [2][1][RTW89_KCC][0][42] = -14, [2][1][RTW89_ACMA][1][42] = 44, @@ -55735,6 +58903,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][42] = 6, [2][1][RTW89_UK][1][42] = 44, [2][1][RTW89_UK][0][42] = 6, + [2][1][RTW89_THAILAND][1][42] = 28, + [2][1][RTW89_THAILAND][0][42] = -16, [2][1][RTW89_FCC][1][44] = -16, [2][1][RTW89_FCC][2][44] = 54, [2][1][RTW89_ETSI][1][44] = 44, @@ -55742,6 +58912,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][44] = 16, [2][1][RTW89_MKK][0][44] = 2, [2][1][RTW89_IC][1][44] = -16, + [2][1][RTW89_IC][2][44] = 54, [2][1][RTW89_KCC][1][44] = -14, [2][1][RTW89_KCC][0][44] = -14, [2][1][RTW89_ACMA][1][44] = 44, @@ -55751,6 +58922,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][44] = 6, [2][1][RTW89_UK][1][44] = 44, [2][1][RTW89_UK][0][44] = 6, + [2][1][RTW89_THAILAND][1][44] = 28, + [2][1][RTW89_THAILAND][0][44] = -16, [2][1][RTW89_FCC][1][45] = -16, [2][1][RTW89_FCC][2][45] = 127, [2][1][RTW89_ETSI][1][45] = 127, @@ -55758,6 +58931,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][45] = 127, [2][1][RTW89_MKK][0][45] = 127, [2][1][RTW89_IC][1][45] = -16, + [2][1][RTW89_IC][2][45] = 56, [2][1][RTW89_KCC][1][45] = -14, [2][1][RTW89_KCC][0][45] = 127, [2][1][RTW89_ACMA][1][45] = 127, @@ -55767,6 +58941,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][45] = 127, [2][1][RTW89_UK][1][45] = 127, [2][1][RTW89_UK][0][45] = 127, + [2][1][RTW89_THAILAND][1][45] = 127, + [2][1][RTW89_THAILAND][0][45] = 127, [2][1][RTW89_FCC][1][47] = -16, [2][1][RTW89_FCC][2][47] = 127, [2][1][RTW89_ETSI][1][47] = 127, @@ -55774,6 +58950,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][47] = 127, [2][1][RTW89_MKK][0][47] = 127, [2][1][RTW89_IC][1][47] = -16, + [2][1][RTW89_IC][2][47] = 56, [2][1][RTW89_KCC][1][47] = -14, [2][1][RTW89_KCC][0][47] = 127, [2][1][RTW89_ACMA][1][47] = 127, @@ -55783,6 +58960,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][47] = 127, [2][1][RTW89_UK][1][47] = 127, [2][1][RTW89_UK][0][47] = 127, + [2][1][RTW89_THAILAND][1][47] = 127, + [2][1][RTW89_THAILAND][0][47] = 127, [2][1][RTW89_FCC][1][49] = -16, [2][1][RTW89_FCC][2][49] = 127, [2][1][RTW89_ETSI][1][49] = 127, @@ -55790,6 +58969,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][49] = 127, [2][1][RTW89_MKK][0][49] = 127, [2][1][RTW89_IC][1][49] = -16, + [2][1][RTW89_IC][2][49] = 56, [2][1][RTW89_KCC][1][49] = -14, [2][1][RTW89_KCC][0][49] = 127, [2][1][RTW89_ACMA][1][49] = 127, @@ -55799,6 +58979,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][49] = 127, [2][1][RTW89_UK][1][49] = 127, [2][1][RTW89_UK][0][49] = 127, + [2][1][RTW89_THAILAND][1][49] = 127, + [2][1][RTW89_THAILAND][0][49] = 127, [2][1][RTW89_FCC][1][51] = -16, [2][1][RTW89_FCC][2][51] = 127, [2][1][RTW89_ETSI][1][51] = 127, @@ -55806,6 +58988,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][51] = 127, [2][1][RTW89_MKK][0][51] = 127, [2][1][RTW89_IC][1][51] = -16, + [2][1][RTW89_IC][2][51] = 56, [2][1][RTW89_KCC][1][51] = -14, [2][1][RTW89_KCC][0][51] = 127, [2][1][RTW89_ACMA][1][51] = 127, @@ -55815,6 +58998,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][51] = 127, [2][1][RTW89_UK][1][51] = 127, [2][1][RTW89_UK][0][51] = 127, + [2][1][RTW89_THAILAND][1][51] = 127, + [2][1][RTW89_THAILAND][0][51] = 127, [2][1][RTW89_FCC][1][53] = -16, [2][1][RTW89_FCC][2][53] = 127, [2][1][RTW89_ETSI][1][53] = 127, @@ -55822,6 +59007,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][53] = 127, [2][1][RTW89_MKK][0][53] = 127, [2][1][RTW89_IC][1][53] = -16, + [2][1][RTW89_IC][2][53] = 56, [2][1][RTW89_KCC][1][53] = -14, [2][1][RTW89_KCC][0][53] = 127, [2][1][RTW89_ACMA][1][53] = 127, @@ -55831,6 +59017,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][53] = 127, [2][1][RTW89_UK][1][53] = 127, [2][1][RTW89_UK][0][53] = 127, + [2][1][RTW89_THAILAND][1][53] = 127, + [2][1][RTW89_THAILAND][0][53] = 127, [2][1][RTW89_FCC][1][55] = -16, [2][1][RTW89_FCC][2][55] = 54, [2][1][RTW89_ETSI][1][55] = 127, @@ -55838,6 +59026,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][55] = 127, [2][1][RTW89_MKK][0][55] = 127, [2][1][RTW89_IC][1][55] = -16, + [2][1][RTW89_IC][2][55] = 54, [2][1][RTW89_KCC][1][55] = -14, [2][1][RTW89_KCC][0][55] = 127, [2][1][RTW89_ACMA][1][55] = 127, @@ -55847,6 +59036,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][55] = 127, [2][1][RTW89_UK][1][55] = 127, [2][1][RTW89_UK][0][55] = 127, + [2][1][RTW89_THAILAND][1][55] = 127, + [2][1][RTW89_THAILAND][0][55] = 127, [2][1][RTW89_FCC][1][57] = -16, [2][1][RTW89_FCC][2][57] = 54, [2][1][RTW89_ETSI][1][57] = 127, @@ -55854,6 +59045,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][57] = 127, [2][1][RTW89_MKK][0][57] = 127, [2][1][RTW89_IC][1][57] = -16, + [2][1][RTW89_IC][2][57] = 54, [2][1][RTW89_KCC][1][57] = -14, [2][1][RTW89_KCC][0][57] = 127, [2][1][RTW89_ACMA][1][57] = 127, @@ -55863,6 +59055,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][57] = 127, [2][1][RTW89_UK][1][57] = 127, [2][1][RTW89_UK][0][57] = 127, + [2][1][RTW89_THAILAND][1][57] = 127, + [2][1][RTW89_THAILAND][0][57] = 127, [2][1][RTW89_FCC][1][59] = -16, [2][1][RTW89_FCC][2][59] = 54, [2][1][RTW89_ETSI][1][59] = 127, @@ -55870,6 +59064,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][59] = 127, [2][1][RTW89_MKK][0][59] = 127, [2][1][RTW89_IC][1][59] = -16, + [2][1][RTW89_IC][2][59] = 54, [2][1][RTW89_KCC][1][59] = -14, [2][1][RTW89_KCC][0][59] = 127, [2][1][RTW89_ACMA][1][59] = 127, @@ -55879,6 +59074,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][59] = 127, [2][1][RTW89_UK][1][59] = 127, [2][1][RTW89_UK][0][59] = 127, + [2][1][RTW89_THAILAND][1][59] = 127, + [2][1][RTW89_THAILAND][0][59] = 127, [2][1][RTW89_FCC][1][60] = -16, [2][1][RTW89_FCC][2][60] = 54, [2][1][RTW89_ETSI][1][60] = 127, @@ -55886,6 +59083,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][60] = 127, [2][1][RTW89_MKK][0][60] = 127, [2][1][RTW89_IC][1][60] = -16, + [2][1][RTW89_IC][2][60] = 54, [2][1][RTW89_KCC][1][60] = -14, [2][1][RTW89_KCC][0][60] = 127, [2][1][RTW89_ACMA][1][60] = 127, @@ -55895,6 +59093,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][60] = 127, [2][1][RTW89_UK][1][60] = 127, [2][1][RTW89_UK][0][60] = 127, + [2][1][RTW89_THAILAND][1][60] = 127, + [2][1][RTW89_THAILAND][0][60] = 127, [2][1][RTW89_FCC][1][62] = -16, [2][1][RTW89_FCC][2][62] = 54, [2][1][RTW89_ETSI][1][62] = 127, @@ -55902,6 +59102,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][62] = 127, [2][1][RTW89_MKK][0][62] = 127, [2][1][RTW89_IC][1][62] = -16, + [2][1][RTW89_IC][2][62] = 54, [2][1][RTW89_KCC][1][62] = -14, [2][1][RTW89_KCC][0][62] = 127, [2][1][RTW89_ACMA][1][62] = 127, @@ -55911,6 +59112,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][62] = 127, [2][1][RTW89_UK][1][62] = 127, [2][1][RTW89_UK][0][62] = 127, + [2][1][RTW89_THAILAND][1][62] = 127, + [2][1][RTW89_THAILAND][0][62] = 127, [2][1][RTW89_FCC][1][64] = -16, [2][1][RTW89_FCC][2][64] = 54, [2][1][RTW89_ETSI][1][64] = 127, @@ -55918,6 +59121,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][64] = 127, [2][1][RTW89_MKK][0][64] = 127, [2][1][RTW89_IC][1][64] = -16, + [2][1][RTW89_IC][2][64] = 54, [2][1][RTW89_KCC][1][64] = -14, [2][1][RTW89_KCC][0][64] = 127, [2][1][RTW89_ACMA][1][64] = 127, @@ -55927,6 +59131,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][64] = 127, [2][1][RTW89_UK][1][64] = 127, [2][1][RTW89_UK][0][64] = 127, + [2][1][RTW89_THAILAND][1][64] = 127, + [2][1][RTW89_THAILAND][0][64] = 127, [2][1][RTW89_FCC][1][66] = -16, [2][1][RTW89_FCC][2][66] = 54, [2][1][RTW89_ETSI][1][66] = 127, @@ -55934,6 +59140,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][66] = 127, [2][1][RTW89_MKK][0][66] = 127, [2][1][RTW89_IC][1][66] = -16, + [2][1][RTW89_IC][2][66] = 54, [2][1][RTW89_KCC][1][66] = -14, [2][1][RTW89_KCC][0][66] = 127, [2][1][RTW89_ACMA][1][66] = 127, @@ -55943,6 +59150,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][66] = 127, [2][1][RTW89_UK][1][66] = 127, [2][1][RTW89_UK][0][66] = 127, + [2][1][RTW89_THAILAND][1][66] = 127, + [2][1][RTW89_THAILAND][0][66] = 127, [2][1][RTW89_FCC][1][68] = -16, [2][1][RTW89_FCC][2][68] = 54, [2][1][RTW89_ETSI][1][68] = 127, @@ -55950,6 +59159,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][68] = 127, [2][1][RTW89_MKK][0][68] = 127, [2][1][RTW89_IC][1][68] = -16, + [2][1][RTW89_IC][2][68] = 54, [2][1][RTW89_KCC][1][68] = -14, [2][1][RTW89_KCC][0][68] = 127, [2][1][RTW89_ACMA][1][68] = 127, @@ -55959,6 +59169,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][68] = 127, [2][1][RTW89_UK][1][68] = 127, [2][1][RTW89_UK][0][68] = 127, + [2][1][RTW89_THAILAND][1][68] = 127, + [2][1][RTW89_THAILAND][0][68] = 127, [2][1][RTW89_FCC][1][70] = -16, [2][1][RTW89_FCC][2][70] = 56, [2][1][RTW89_ETSI][1][70] = 127, @@ -55966,6 +59178,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][70] = 127, [2][1][RTW89_MKK][0][70] = 127, [2][1][RTW89_IC][1][70] = -16, + [2][1][RTW89_IC][2][70] = 56, [2][1][RTW89_KCC][1][70] = -14, [2][1][RTW89_KCC][0][70] = 127, [2][1][RTW89_ACMA][1][70] = 127, @@ -55975,6 +59188,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][70] = 127, [2][1][RTW89_UK][1][70] = 127, [2][1][RTW89_UK][0][70] = 127, + [2][1][RTW89_THAILAND][1][70] = 127, + [2][1][RTW89_THAILAND][0][70] = 127, [2][1][RTW89_FCC][1][72] = -16, [2][1][RTW89_FCC][2][72] = 56, [2][1][RTW89_ETSI][1][72] = 127, @@ -55982,6 +59197,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][72] = 127, [2][1][RTW89_MKK][0][72] = 127, [2][1][RTW89_IC][1][72] = -16, + [2][1][RTW89_IC][2][72] = 56, [2][1][RTW89_KCC][1][72] = -14, [2][1][RTW89_KCC][0][72] = 127, [2][1][RTW89_ACMA][1][72] = 127, @@ -55991,6 +59207,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][72] = 127, [2][1][RTW89_UK][1][72] = 127, [2][1][RTW89_UK][0][72] = 127, + [2][1][RTW89_THAILAND][1][72] = 127, + [2][1][RTW89_THAILAND][0][72] = 127, [2][1][RTW89_FCC][1][74] = -16, [2][1][RTW89_FCC][2][74] = 56, [2][1][RTW89_ETSI][1][74] = 127, @@ -55998,6 +59216,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][74] = 127, [2][1][RTW89_MKK][0][74] = 127, [2][1][RTW89_IC][1][74] = -16, + [2][1][RTW89_IC][2][74] = 56, [2][1][RTW89_KCC][1][74] = -14, [2][1][RTW89_KCC][0][74] = 127, [2][1][RTW89_ACMA][1][74] = 127, @@ -56007,6 +59226,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][74] = 127, [2][1][RTW89_UK][1][74] = 127, [2][1][RTW89_UK][0][74] = 127, + [2][1][RTW89_THAILAND][1][74] = 127, + [2][1][RTW89_THAILAND][0][74] = 127, [2][1][RTW89_FCC][1][75] = -16, [2][1][RTW89_FCC][2][75] = 56, [2][1][RTW89_ETSI][1][75] = 127, @@ -56014,6 +59235,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][75] = 127, [2][1][RTW89_MKK][0][75] = 127, [2][1][RTW89_IC][1][75] = -16, + [2][1][RTW89_IC][2][75] = 56, [2][1][RTW89_KCC][1][75] = -14, [2][1][RTW89_KCC][0][75] = 127, [2][1][RTW89_ACMA][1][75] = 127, @@ -56023,6 +59245,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][75] = 127, [2][1][RTW89_UK][1][75] = 127, [2][1][RTW89_UK][0][75] = 127, + [2][1][RTW89_THAILAND][1][75] = 127, + [2][1][RTW89_THAILAND][0][75] = 127, [2][1][RTW89_FCC][1][77] = -16, [2][1][RTW89_FCC][2][77] = 56, [2][1][RTW89_ETSI][1][77] = 127, @@ -56030,6 +59254,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][77] = 127, [2][1][RTW89_MKK][0][77] = 127, [2][1][RTW89_IC][1][77] = -16, + [2][1][RTW89_IC][2][77] = 56, [2][1][RTW89_KCC][1][77] = -14, [2][1][RTW89_KCC][0][77] = 127, [2][1][RTW89_ACMA][1][77] = 127, @@ -56039,6 +59264,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][77] = 127, [2][1][RTW89_UK][1][77] = 127, [2][1][RTW89_UK][0][77] = 127, + [2][1][RTW89_THAILAND][1][77] = 127, + [2][1][RTW89_THAILAND][0][77] = 127, [2][1][RTW89_FCC][1][79] = -16, [2][1][RTW89_FCC][2][79] = 56, [2][1][RTW89_ETSI][1][79] = 127, @@ -56046,6 +59273,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][79] = 127, [2][1][RTW89_MKK][0][79] = 127, [2][1][RTW89_IC][1][79] = -16, + [2][1][RTW89_IC][2][79] = 56, [2][1][RTW89_KCC][1][79] = -14, [2][1][RTW89_KCC][0][79] = 127, [2][1][RTW89_ACMA][1][79] = 127, @@ -56055,6 +59283,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][79] = 127, [2][1][RTW89_UK][1][79] = 127, [2][1][RTW89_UK][0][79] = 127, + [2][1][RTW89_THAILAND][1][79] = 127, + [2][1][RTW89_THAILAND][0][79] = 127, [2][1][RTW89_FCC][1][81] = -16, [2][1][RTW89_FCC][2][81] = 56, [2][1][RTW89_ETSI][1][81] = 127, @@ -56062,6 +59292,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][81] = 127, [2][1][RTW89_MKK][0][81] = 127, [2][1][RTW89_IC][1][81] = -16, + [2][1][RTW89_IC][2][81] = 56, [2][1][RTW89_KCC][1][81] = -14, [2][1][RTW89_KCC][0][81] = 127, [2][1][RTW89_ACMA][1][81] = 127, @@ -56071,6 +59302,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][81] = 127, [2][1][RTW89_UK][1][81] = 127, [2][1][RTW89_UK][0][81] = 127, + [2][1][RTW89_THAILAND][1][81] = 127, + [2][1][RTW89_THAILAND][0][81] = 127, [2][1][RTW89_FCC][1][83] = -16, [2][1][RTW89_FCC][2][83] = 56, [2][1][RTW89_ETSI][1][83] = 127, @@ -56078,6 +59311,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][83] = 127, [2][1][RTW89_MKK][0][83] = 127, [2][1][RTW89_IC][1][83] = -16, + [2][1][RTW89_IC][2][83] = 56, [2][1][RTW89_KCC][1][83] = -14, [2][1][RTW89_KCC][0][83] = 127, [2][1][RTW89_ACMA][1][83] = 127, @@ -56087,6 +59321,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][83] = 127, [2][1][RTW89_UK][1][83] = 127, [2][1][RTW89_UK][0][83] = 127, + [2][1][RTW89_THAILAND][1][83] = 127, + [2][1][RTW89_THAILAND][0][83] = 127, [2][1][RTW89_FCC][1][85] = -18, [2][1][RTW89_FCC][2][85] = 56, [2][1][RTW89_ETSI][1][85] = 127, @@ -56094,6 +59330,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][85] = 127, [2][1][RTW89_MKK][0][85] = 127, [2][1][RTW89_IC][1][85] = -18, + [2][1][RTW89_IC][2][85] = 56, [2][1][RTW89_KCC][1][85] = -14, [2][1][RTW89_KCC][0][85] = 127, [2][1][RTW89_ACMA][1][85] = 127, @@ -56103,6 +59340,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][85] = 127, [2][1][RTW89_UK][1][85] = 127, [2][1][RTW89_UK][0][85] = 127, + [2][1][RTW89_THAILAND][1][85] = 127, + [2][1][RTW89_THAILAND][0][85] = 127, [2][1][RTW89_FCC][1][87] = -16, [2][1][RTW89_FCC][2][87] = 127, [2][1][RTW89_ETSI][1][87] = 127, @@ -56110,6 +59349,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][87] = 127, [2][1][RTW89_MKK][0][87] = 127, [2][1][RTW89_IC][1][87] = -16, + [2][1][RTW89_IC][2][87] = 127, [2][1][RTW89_KCC][1][87] = -14, [2][1][RTW89_KCC][0][87] = 127, [2][1][RTW89_ACMA][1][87] = 127, @@ -56119,6 +59359,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][87] = 127, [2][1][RTW89_UK][1][87] = 127, [2][1][RTW89_UK][0][87] = 127, + [2][1][RTW89_THAILAND][1][87] = 127, + [2][1][RTW89_THAILAND][0][87] = 127, [2][1][RTW89_FCC][1][89] = -16, [2][1][RTW89_FCC][2][89] = 127, [2][1][RTW89_ETSI][1][89] = 127, @@ -56126,6 +59368,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][89] = 127, [2][1][RTW89_MKK][0][89] = 127, [2][1][RTW89_IC][1][89] = -16, + [2][1][RTW89_IC][2][89] = 127, [2][1][RTW89_KCC][1][89] = -14, [2][1][RTW89_KCC][0][89] = 127, [2][1][RTW89_ACMA][1][89] = 127, @@ -56135,6 +59378,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][89] = 127, [2][1][RTW89_UK][1][89] = 127, [2][1][RTW89_UK][0][89] = 127, + [2][1][RTW89_THAILAND][1][89] = 127, + [2][1][RTW89_THAILAND][0][89] = 127, [2][1][RTW89_FCC][1][90] = -16, [2][1][RTW89_FCC][2][90] = 127, [2][1][RTW89_ETSI][1][90] = 127, @@ -56142,6 +59387,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][90] = 127, [2][1][RTW89_MKK][0][90] = 127, [2][1][RTW89_IC][1][90] = -16, + [2][1][RTW89_IC][2][90] = 127, [2][1][RTW89_KCC][1][90] = -14, [2][1][RTW89_KCC][0][90] = 127, [2][1][RTW89_ACMA][1][90] = 127, @@ -56151,6 +59397,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][90] = 127, [2][1][RTW89_UK][1][90] = 127, [2][1][RTW89_UK][0][90] = 127, + [2][1][RTW89_THAILAND][1][90] = 127, + [2][1][RTW89_THAILAND][0][90] = 127, [2][1][RTW89_FCC][1][92] = -16, [2][1][RTW89_FCC][2][92] = 127, [2][1][RTW89_ETSI][1][92] = 127, @@ -56158,6 +59406,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][92] = 127, [2][1][RTW89_MKK][0][92] = 127, [2][1][RTW89_IC][1][92] = -16, + [2][1][RTW89_IC][2][92] = 127, [2][1][RTW89_KCC][1][92] = -14, [2][1][RTW89_KCC][0][92] = 127, [2][1][RTW89_ACMA][1][92] = 127, @@ -56167,6 +59416,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][92] = 127, [2][1][RTW89_UK][1][92] = 127, [2][1][RTW89_UK][0][92] = 127, + [2][1][RTW89_THAILAND][1][92] = 127, + [2][1][RTW89_THAILAND][0][92] = 127, [2][1][RTW89_FCC][1][94] = -16, [2][1][RTW89_FCC][2][94] = 127, [2][1][RTW89_ETSI][1][94] = 127, @@ -56174,6 +59425,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][94] = 127, [2][1][RTW89_MKK][0][94] = 127, [2][1][RTW89_IC][1][94] = -16, + [2][1][RTW89_IC][2][94] = 127, [2][1][RTW89_KCC][1][94] = -14, [2][1][RTW89_KCC][0][94] = 127, [2][1][RTW89_ACMA][1][94] = 127, @@ -56183,6 +59435,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][94] = 127, [2][1][RTW89_UK][1][94] = 127, [2][1][RTW89_UK][0][94] = 127, + [2][1][RTW89_THAILAND][1][94] = 127, + [2][1][RTW89_THAILAND][0][94] = 127, [2][1][RTW89_FCC][1][96] = -16, [2][1][RTW89_FCC][2][96] = 127, [2][1][RTW89_ETSI][1][96] = 127, @@ -56190,6 +59444,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][96] = 127, [2][1][RTW89_MKK][0][96] = 127, [2][1][RTW89_IC][1][96] = -16, + [2][1][RTW89_IC][2][96] = 127, [2][1][RTW89_KCC][1][96] = -14, [2][1][RTW89_KCC][0][96] = 127, [2][1][RTW89_ACMA][1][96] = 127, @@ -56199,6 +59454,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][96] = 127, [2][1][RTW89_UK][1][96] = 127, [2][1][RTW89_UK][0][96] = 127, + [2][1][RTW89_THAILAND][1][96] = 127, + [2][1][RTW89_THAILAND][0][96] = 127, [2][1][RTW89_FCC][1][98] = -16, [2][1][RTW89_FCC][2][98] = 127, [2][1][RTW89_ETSI][1][98] = 127, @@ -56206,6 +59463,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][98] = 127, [2][1][RTW89_MKK][0][98] = 127, [2][1][RTW89_IC][1][98] = -16, + [2][1][RTW89_IC][2][98] = 127, [2][1][RTW89_KCC][1][98] = -14, [2][1][RTW89_KCC][0][98] = 127, [2][1][RTW89_ACMA][1][98] = 127, @@ -56215,6 +59473,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][98] = 127, [2][1][RTW89_UK][1][98] = 127, [2][1][RTW89_UK][0][98] = 127, + [2][1][RTW89_THAILAND][1][98] = 127, + [2][1][RTW89_THAILAND][0][98] = 127, [2][1][RTW89_FCC][1][100] = -16, [2][1][RTW89_FCC][2][100] = 127, [2][1][RTW89_ETSI][1][100] = 127, @@ -56222,6 +59482,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][100] = 127, [2][1][RTW89_MKK][0][100] = 127, [2][1][RTW89_IC][1][100] = -16, + [2][1][RTW89_IC][2][100] = 127, [2][1][RTW89_KCC][1][100] = -14, [2][1][RTW89_KCC][0][100] = 127, [2][1][RTW89_ACMA][1][100] = 127, @@ -56231,6 +59492,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][100] = 127, [2][1][RTW89_UK][1][100] = 127, [2][1][RTW89_UK][0][100] = 127, + [2][1][RTW89_THAILAND][1][100] = 127, + [2][1][RTW89_THAILAND][0][100] = 127, [2][1][RTW89_FCC][1][102] = -16, [2][1][RTW89_FCC][2][102] = 127, [2][1][RTW89_ETSI][1][102] = 127, @@ -56238,6 +59501,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][102] = 127, [2][1][RTW89_MKK][0][102] = 127, [2][1][RTW89_IC][1][102] = -16, + [2][1][RTW89_IC][2][102] = 127, [2][1][RTW89_KCC][1][102] = -14, [2][1][RTW89_KCC][0][102] = 127, [2][1][RTW89_ACMA][1][102] = 127, @@ -56247,6 +59511,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][102] = 127, [2][1][RTW89_UK][1][102] = 127, [2][1][RTW89_UK][0][102] = 127, + [2][1][RTW89_THAILAND][1][102] = 127, + [2][1][RTW89_THAILAND][0][102] = 127, [2][1][RTW89_FCC][1][104] = -16, [2][1][RTW89_FCC][2][104] = 127, [2][1][RTW89_ETSI][1][104] = 127, @@ -56254,6 +59520,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][104] = 127, [2][1][RTW89_MKK][0][104] = 127, [2][1][RTW89_IC][1][104] = -16, + [2][1][RTW89_IC][2][104] = 127, [2][1][RTW89_KCC][1][104] = -14, [2][1][RTW89_KCC][0][104] = 127, [2][1][RTW89_ACMA][1][104] = 127, @@ -56263,6 +59530,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][104] = 127, [2][1][RTW89_UK][1][104] = 127, [2][1][RTW89_UK][0][104] = 127, + [2][1][RTW89_THAILAND][1][104] = 127, + [2][1][RTW89_THAILAND][0][104] = 127, [2][1][RTW89_FCC][1][105] = -16, [2][1][RTW89_FCC][2][105] = 127, [2][1][RTW89_ETSI][1][105] = 127, @@ -56270,6 +59539,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][105] = 127, [2][1][RTW89_MKK][0][105] = 127, [2][1][RTW89_IC][1][105] = -16, + [2][1][RTW89_IC][2][105] = 127, [2][1][RTW89_KCC][1][105] = -14, [2][1][RTW89_KCC][0][105] = 127, [2][1][RTW89_ACMA][1][105] = 127, @@ -56279,6 +59549,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][105] = 127, [2][1][RTW89_UK][1][105] = 127, [2][1][RTW89_UK][0][105] = 127, + [2][1][RTW89_THAILAND][1][105] = 127, + [2][1][RTW89_THAILAND][0][105] = 127, [2][1][RTW89_FCC][1][107] = -12, [2][1][RTW89_FCC][2][107] = 127, [2][1][RTW89_ETSI][1][107] = 127, @@ -56286,6 +59558,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][107] = 127, [2][1][RTW89_MKK][0][107] = 127, [2][1][RTW89_IC][1][107] = -12, + [2][1][RTW89_IC][2][107] = 127, [2][1][RTW89_KCC][1][107] = -14, [2][1][RTW89_KCC][0][107] = 127, [2][1][RTW89_ACMA][1][107] = 127, @@ -56295,6 +59568,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][107] = 127, [2][1][RTW89_UK][1][107] = 127, [2][1][RTW89_UK][0][107] = 127, + [2][1][RTW89_THAILAND][1][107] = 127, + [2][1][RTW89_THAILAND][0][107] = 127, [2][1][RTW89_FCC][1][109] = -10, [2][1][RTW89_FCC][2][109] = 127, [2][1][RTW89_ETSI][1][109] = 127, @@ -56302,6 +59577,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][109] = 127, [2][1][RTW89_MKK][0][109] = 127, [2][1][RTW89_IC][1][109] = -10, + [2][1][RTW89_IC][2][109] = 127, [2][1][RTW89_KCC][1][109] = 127, [2][1][RTW89_KCC][0][109] = 127, [2][1][RTW89_ACMA][1][109] = 127, @@ -56311,6 +59587,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][109] = 127, [2][1][RTW89_UK][1][109] = 127, [2][1][RTW89_UK][0][109] = 127, + [2][1][RTW89_THAILAND][1][109] = 127, + [2][1][RTW89_THAILAND][0][109] = 127, [2][1][RTW89_FCC][1][111] = 127, [2][1][RTW89_FCC][2][111] = 127, [2][1][RTW89_ETSI][1][111] = 127, @@ -56318,6 +59596,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][111] = 127, [2][1][RTW89_MKK][0][111] = 127, [2][1][RTW89_IC][1][111] = 127, + [2][1][RTW89_IC][2][111] = 127, [2][1][RTW89_KCC][1][111] = 127, [2][1][RTW89_KCC][0][111] = 127, [2][1][RTW89_ACMA][1][111] = 127, @@ -56327,6 +59606,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][111] = 127, [2][1][RTW89_UK][1][111] = 127, [2][1][RTW89_UK][0][111] = 127, + [2][1][RTW89_THAILAND][1][111] = 127, + [2][1][RTW89_THAILAND][0][111] = 127, [2][1][RTW89_FCC][1][113] = 127, [2][1][RTW89_FCC][2][113] = 127, [2][1][RTW89_ETSI][1][113] = 127, @@ -56334,6 +59615,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][113] = 127, [2][1][RTW89_MKK][0][113] = 127, [2][1][RTW89_IC][1][113] = 127, + [2][1][RTW89_IC][2][113] = 127, [2][1][RTW89_KCC][1][113] = 127, [2][1][RTW89_KCC][0][113] = 127, [2][1][RTW89_ACMA][1][113] = 127, @@ -56343,6 +59625,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][113] = 127, [2][1][RTW89_UK][1][113] = 127, [2][1][RTW89_UK][0][113] = 127, + [2][1][RTW89_THAILAND][1][113] = 127, + [2][1][RTW89_THAILAND][0][113] = 127, [2][1][RTW89_FCC][1][115] = 127, [2][1][RTW89_FCC][2][115] = 127, [2][1][RTW89_ETSI][1][115] = 127, @@ -56350,6 +59634,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][115] = 127, [2][1][RTW89_MKK][0][115] = 127, [2][1][RTW89_IC][1][115] = 127, + [2][1][RTW89_IC][2][115] = 127, [2][1][RTW89_KCC][1][115] = 127, [2][1][RTW89_KCC][0][115] = 127, [2][1][RTW89_ACMA][1][115] = 127, @@ -56359,6 +59644,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][115] = 127, [2][1][RTW89_UK][1][115] = 127, [2][1][RTW89_UK][0][115] = 127, + [2][1][RTW89_THAILAND][1][115] = 127, + [2][1][RTW89_THAILAND][0][115] = 127, [2][1][RTW89_FCC][1][117] = 127, [2][1][RTW89_FCC][2][117] = 127, [2][1][RTW89_ETSI][1][117] = 127, @@ -56366,6 +59653,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][117] = 127, [2][1][RTW89_MKK][0][117] = 127, [2][1][RTW89_IC][1][117] = 127, + [2][1][RTW89_IC][2][117] = 127, [2][1][RTW89_KCC][1][117] = 127, [2][1][RTW89_KCC][0][117] = 127, [2][1][RTW89_ACMA][1][117] = 127, @@ -56375,6 +59663,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][117] = 127, [2][1][RTW89_UK][1][117] = 127, [2][1][RTW89_UK][0][117] = 127, + [2][1][RTW89_THAILAND][1][117] = 127, + [2][1][RTW89_THAILAND][0][117] = 127, [2][1][RTW89_FCC][1][119] = 127, [2][1][RTW89_FCC][2][119] = 127, [2][1][RTW89_ETSI][1][119] = 127, @@ -56382,6 +59672,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][119] = 127, [2][1][RTW89_MKK][0][119] = 127, [2][1][RTW89_IC][1][119] = 127, + [2][1][RTW89_IC][2][119] = 127, [2][1][RTW89_KCC][1][119] = 127, [2][1][RTW89_KCC][0][119] = 127, [2][1][RTW89_ACMA][1][119] = 127, @@ -56391,6 +59682,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][119] = 127, [2][1][RTW89_UK][1][119] = 127, [2][1][RTW89_UK][0][119] = 127, + [2][1][RTW89_THAILAND][1][119] = 127, + [2][1][RTW89_THAILAND][0][119] = 127, }; const struct rtw89_phy_table rtw89_8852c_phy_bb_table = { @@ -56425,6 +59718,7 @@ const struct rtw89_phy_table rtw89_8852c_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8852c_byr_table = { .data = rtw89_8852c_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8852c_txpwr_byrate), @@ -56452,12 +59746,16 @@ const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg = { const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table = { .data[RTW89_TSSI_BANDEDGE_FLAT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .data[RTW89_TSSI_BANDEDGE_LOW] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .data[RTW89_TSSI_BANDEDGE_MID] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .data[RTW89_TSSI_BANDEDGE_HIGH] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_LOW] = {0x1d, 0x1d, 0x1d, 0x2f, 0xf, 0xf, 0x2f, 0x38, + 0x28, 0x18, 0x8, 0x8, 0x18, 0x28, 0x38}, + .data[RTW89_TSSI_BANDEDGE_MID] = {0x24, 0x24, 0x24, 0x3b, 0x13, 0x13, 0x3b, 0x46, + 0x32, 0x1e, 0xa, 0xa, 0x1e, 0x32, 0x46}, + .data[RTW89_TSSI_BANDEDGE_HIGH] = {0x2a, 0x2a, 0x2a, 0x46, 0x17, 0x17, 0x46, 0x53, + 0x3b, 0x24, 0xc, 0xc, 0x24, 0x3b, 0x53}, }; const struct rtw89_rfe_parms rtw89_8852c_dflt_parms = { + .byr_tbl = &rtw89_8852c_byr_table, .rule_2ghz = { .lmt = &rtw89_8852c_txpwr_lmt_2g, .lmt_ru = &rtw89_8852c_txpwr_lmt_ru_2g, @@ -56470,4 +59768,8 @@ const struct rtw89_rfe_parms rtw89_8852c_dflt_parms = { .lmt = &rtw89_8852c_txpwr_lmt_6g, .lmt_ru = &rtw89_8852c_txpwr_lmt_ru_6g, }, + .tx_shape = { + .lmt = &rtw89_8852c_tx_shape_lmt, + .lmt_ru = &rtw89_8852c_tx_shape_lmt_ru, + }, }; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h index 3eb0c49951..7c9f3ecdc4 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h @@ -12,11 +12,8 @@ extern const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8852c_byr_table; extern const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table; extern const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg; -extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM]; extern const struct rtw89_rfe_parms rtw89_8852c_dflt_parms; #endif diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index 02cff0f7d8..7142cce167 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -137,6 +137,181 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate) /* TX WD INFO DWORD 5 */ +/* TX WD BODY DWORD 0 */ +#define BE_TXD_BODY0_EN_HWSEQ_MODE GENMASK(1, 0) +#define BE_TXD_BODY0_HW_SSN_SEL GENMASK(4, 2) +#define BE_TXD_BODY0_HWAMSDU BIT(5) +#define BE_TXD_BODY0_HW_SEC_IV BIT(6) +#define BE_TXD_BODY0_WD_PAGE BIT(7) +#define BE_TXD_BODY0_CHK_EN BIT(8) +#define BE_TXD_BODY0_WP_INT BIT(9) +#define BE_TXD_BODY0_STF_MODE BIT(10) +#define BE_TXD_BODY0_HDR_LLC_LEN GENMASK(15, 11) +#define BE_TXD_BODY0_CH_DMA GENMASK(19, 16) +#define BE_TXD_BODY0_SMH_EN BIT(20) +#define BE_TXD_BODY0_PKT_OFFSET BIT(21) +#define BE_TXD_BODY0_WDINFO_EN BIT(22) +#define BE_TXD_BODY0_MOREDATA BIT(23) +#define BE_TXD_BODY0_WP_OFFSET_V1 GENMASK(27, 24) +#define BE_TXD_BODY0_AZ_FTM_SEC_V1 BIT(28) +#define BE_TXD_BODY0_WD_SOURCE GENMASK(30, 29) +#define BE_TXD_BODY0_HCI_SEQNUM_MODE BIT(31) + +/* TX WD BODY DWORD 1 */ +#define BE_TXD_BODY1_DMA_TXAGG_NUM GENMASK(6, 0) +#define BE_TXD_BODY1_REUSE_NUM GENMASK(11, 7) +#define BE_TXD_BODY1_SEC_TYPE GENMASK(15, 12) +#define BE_TXD_BODY1_SEC_KEYID GENMASK(17, 16) +#define BE_TXD_BODY1_SW_SEC_IV BIT(18) +#define BE_TXD_BODY1_REUSE_SIZE GENMASK(23, 20) +#define BE_TXD_BODY1_REUSE_START_OFFSET GENMASK(25, 24) +#define BE_TXD_BODY1_ADDR_INFO_NUM GENMASK(31, 26) + +/* TX WD BODY DWORD 2 */ +#define BE_TXD_BODY2_TXPKTSIZE GENMASK(13, 0) +#define BE_TXD_BODY2_AGG_EN BIT(14) +#define BE_TXD_BODY2_BK BIT(15) +#define BE_TXD_BODY2_MACID_EXTEND BIT(16) +#define BE_TXD_BODY2_QSEL GENMASK(22, 17) +#define BE_TXD_BODY2_TID_IND BIT(23) +#define BE_TXD_BODY2_MACID GENMASK(31, 24) + +/* TX WD BODY DWORD 3 */ +#define BE_TXD_BODY3_WIFI_SEQ GENMASK(11, 0) +#define BE_TXD_BODY3_MLO_FLAG BIT(12) +#define BE_TXD_BODY3_IS_MLD_SW_EN BIT(13) +#define BE_TXD_BODY3_TRY_RATE BIT(14) +#define BE_TXD_BODY3_RELINK_FLAG_V1 BIT(15) +#define BE_TXD_BODY3_BAND0_SU_TC_V1 GENMASK(21, 16) +#define BE_TXD_BODY3_TOTAL_TC GENMASK(27, 22) +#define BE_TXD_BODY3_RU_RTY BIT(28) +#define BE_TXD_BODY3_MU_PRI_RTY BIT(29) +#define BE_TXD_BODY3_MU_2ND_RTY BIT(30) +#define BE_TXD_BODY3_BAND1_SU_RTY_V1 BIT(31) + +/* TX WD BODY DWORD 4 */ +#define BE_TXD_BODY4_TXDESC_CHECKSUM GENMASK(15, 0) +#define BE_TXD_BODY4_SEC_IV_L0 GENMASK(23, 16) +#define BE_TXD_BODY4_SEC_IV_L1 GENMASK(31, 24) + +/* TX WD BODY DWORD 5 */ +#define BE_TXD_BODY5_SEC_IV_H2 GENMASK(7, 0) +#define BE_TXD_BODY5_SEC_IV_H3 GENMASK(15, 8) +#define BE_TXD_BODY5_SEC_IV_H4 GENMASK(23, 16) +#define BE_TXD_BODY5_SEC_IV_H5 GENMASK(31, 24) + +/* TX WD BODY DWORD 6 */ +#define BE_TXD_BODY6_MU_TC GENMASK(4, 0) +#define BE_TXD_BODY6_RU_TC GENMASK(9, 5) +#define BE_TXD_BODY6_PS160 BIT(10) +#define BE_TXD_BODY6_BMC BIT(11) +#define BE_TXD_BODY6_NO_ACK BIT(12) +#define BE_TXD_BODY6_UPD_WLAN_HDR BIT(13) +#define BE_TXD_BODY6_A4_HDR BIT(14) +#define BE_TXD_BODY6_EOSP_BIT BIT(15) +#define BE_TXD_BODY6_S_IDX GENMASK(23, 16) +#define BE_TXD_BODY6_RU_POS GENMASK(31, 24) + +/* TX WD BODY DWORD 7 */ +#define BE_TXD_BODY7_RTS_TC GENMASK(5, 0) +#define BE_TXD_BODY7_MSDU_NUM GENMASK(9, 6) +#define BE_TXD_BODY7_DATA_ER BIT(10) +#define BE_TXD_BODY7_DATA_BW_ER BIT(11) +#define BE_TXD_BODY7_DATA_DCM BIT(12) +#define BE_TXD_BODY7_GI_LTF GENMASK(15, 13) +#define BE_TXD_BODY7_DATARATE GENMASK(27, 16) +#define BE_TXD_BODY7_DATA_BW GENMASK(30, 28) +#define BE_TXD_BODY7_USERATE_SEL BIT(31) + +/* TX WD INFO DWORD 0 */ +#define BE_TXD_INFO0_MBSSID GENMASK(3, 0) +#define BE_TXD_INFO0_MULTIPORT_ID GENMASK(6, 4) +#define BE_TXD_INFO0_DISRTSFB BIT(9) +#define BE_TXD_INFO0_DISDATAFB BIT(10) +#define BE_TXD_INFO0_DATA_LDPC BIT(11) +#define BE_TXD_INFO0_DATA_STBC BIT(12) +#define BE_TXD_INFO0_DATA_TXCNT_LMT GENMASK(21, 16) +#define BE_TXD_INFO0_DATA_TXCNT_LMT_SEL BIT(22) +#define BE_TXD_INFO0_RESP_PHYSTS_CSI_EN_V1 BIT(23) +#define BE_TXD_INFO0_RLS_TO_CPUIO BIT(30) +#define BE_TXD_INFO0_ACK_CH_INFO BIT(31) + +/* TX WD INFO DWORD 1 */ +#define BE_TXD_INFO1_MAX_AGG_NUM GENMASK(7, 0) +#define BE_TXD_INFO1_BCN_SRCH_SEQ GENMASK(9, 8) +#define BE_TXD_INFO1_NAVUSEHDR BIT(10) +#define BE_TXD_INFO1_A_CTRL_BQR BIT(12) +#define BE_TXD_INFO1_A_CTRL_BSR BIT(14) +#define BE_TXD_INFO1_A_CTRL_CAS BIT(15) +#define BE_TXD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(27, 16) +#define BE_TXD_INFO1_SW_DEFINE GENMASK(31, 28) + +/* TX WD INFO DWORD 2 */ +#define BE_TXD_INFO2_SEC_CAM_IDX GENMASK(7, 0) +#define BE_TXD_INFO2_FORCE_KEY_EN BIT(8) +#define BE_TXD_INFO2_LIFETIME_SEL GENMASK(15, 13) +#define BE_TXD_INFO2_FORCE_TXOP BIT(17) +#define BE_TXD_INFO2_AMPDU_DENSITY GENMASK(20, 18) +#define BE_TXD_INFO2_LSIG_TXOP_EN BIT(21) +#define BE_TXD_INFO2_OBW_CTS2SELF_DUP_TYPE GENMASK(29, 26) +#define BE_TXD_INFO2_SPE_RPT_V1 BIT(30) +#define BE_TXD_INFO2_SIFS_TX_V1 BIT(31) + +/* TX WD INFO DWORD 3 */ +#define BE_TXD_INFO3_SPE_PKT GENMASK(3, 0) +#define BE_TXD_INFO3_SPE_PKT_TYPE GENMASK(7, 4) +#define BE_TXD_INFO3_CQI_SND BIT(8) +#define BE_TXD_INFO3_RTT_EN BIT(9) +#define BE_TXD_INFO3_HT_DATA_SND_V1 BIT(10) +#define BE_TXD_INFO3_BT_NULL BIT(11) +#define BE_TXD_INFO3_TRI_FRAME BIT(12) +#define BE_TXD_INFO3_NULL_0 BIT(13) +#define BE_TXD_INFO3_NULL_1 BIT(14) +#define BE_TXD_INFO3_RAW BIT(15) +#define BE_TXD_INFO3_GROUP_BIT_IE_OFFSET GENMASK(23, 16) +#define BE_TXD_INFO3_SIGNALING_TA_PKT_EN BIT(25) +#define BE_TXD_INFO3_BCNPKT_TSF_CTRL BIT(26) +#define BE_TXD_INFO3_SIGNALING_TA_PKT_SC GENMASK(30, 27) +#define BE_TXD_INFO3_FORCE_BSS_CLR BIT(31) + +/* TX WD INFO DWORD 4 */ +#define BE_TXD_INFO4_PUNCTURE_PATTERN GENMASK(15, 0) +#define BE_TXD_INFO4_PUNC_MODE GENMASK(17, 16) +#define BE_TXD_INFO4_SW_TX_OK_0 BIT(18) +#define BE_TXD_INFO4_SW_TX_OK_1 BIT(19) +#define BE_TXD_INFO4_SW_TX_PWR_DBM GENMASK(26, 23) +#define BE_TXD_INFO4_RTS_EN BIT(27) +#define BE_TXD_INFO4_CTS2SELF BIT(28) +#define BE_TXD_INFO4_CCA_RTS GENMASK(30, 29) +#define BE_TXD_INFO4_HW_RTS_EN BIT(31) + +/* TX WD INFO DWORD 5 */ +#define BE_TXD_INFO5_SR_RATE_V1 GENMASK(4, 0) +#define BE_TXD_INFO5_SR_EN_V1 BIT(5) +#define BE_TXD_INFO5_NDPA_DURATION GENMASK(31, 16) + +/* TX WD INFO DWORD 6 */ +#define BE_TXD_INFO6_UL_APEP_LEN GENMASK(11, 0) +#define BE_TXD_INFO6_UL_GI_LTF GENMASK(14, 12) +#define BE_TXD_INFO6_UL_DOPPLER BIT(15) +#define BE_TXD_INFO6_UL_STBC BIT(16) +#define BE_TXD_INFO6_UL_LENGTH_REF GENMASK(21, 18) +#define BE_TXD_INFO6_UL_RF_GAIN_IDX GENMASK(31, 22) + +/* TX WD INFO DWORD 7 */ +#define BE_TXD_INFO7_UL_FIXED_GAIN_EN BIT(0) +#define BE_TXD_INFO7_UL_PRI_EXP_RSSI_DBM GENMASK(7, 1) +#define BE_TXD_INFO7_ELNA_IDX BIT(8) +#define BE_TXD_INFO7_UL_APEP_UNIT GENMASK(10, 9) +#define BE_TXD_INFO7_UL_TRI_PAD GENMASK(13, 11) +#define BE_TXD_INFO7_UL_T_PE GENMASK(15, 14) +#define BE_TXD_INFO7_UL_EHT_USR_PRES BIT(16) +#define BE_TXD_INFO7_UL_HELTF_SYMBOL_NUM GENMASK(19, 17) +#define BE_TXD_INFO7_ULBW GENMASK(21, 20) +#define BE_TXD_INFO7_ULBW_EXT GENMASK(23, 22) +#define BE_TXD_INFO7_USE_WD_UL GENMASK(25, 24) +#define BE_TXD_INFO7_EXTEND_MODE_SEL GENMASK(31, 28) + /* RX WD dword0 */ #define AX_RXD_RPKT_LEN_MASK GENMASK(13, 0) #define AX_RXD_SHIFT_MASK GENMASK(15, 14) @@ -269,6 +444,102 @@ struct rtw89_phy_sts_iehdr { #define RTW89_PHY_STS_IEHDR_TYPE GENMASK(4, 0) #define RTW89_PHY_STS_IEHDR_LEN GENMASK(11, 5) +/* BE RXD dword0 */ +#define BE_RXD_RPKT_LEN_MASK GENMASK(13, 0) +#define BE_RXD_SHIFT_MASK GENMASK(15, 14) +#define BE_RXD_DRV_INFO_SZ_MASK GENMASK(19, 18) +#define BE_RXD_HDR_CNV_SZ_MASK GENMASK(21, 20) +#define BE_RXD_PHY_RPT_SZ_MASK GENMASK(23, 22) +#define BE_RXD_RPKT_TYPE_MASK GENMASK(29, 24) +#define BE_RXD_BB_SEL BIT(30) +#define BE_RXD_LONG_RXD BIT(31) + +/* BE RXD dword1 */ +#define BE_RXD_PKT_ID_MASK GENMASK(11, 0) +#define BE_RXD_FWD_TARGET_MASK GENMASK(23, 16) +#define BE_RXD_BCN_FW_INFO_MASK GENMASK(25, 24) +#define BE_RXD_FW_RLS BIT(26) + +/* BE RXD dword2 */ +#define BE_RXD_MAC_ID_MASK GENMASK(7, 0) +#define BE_RXD_TYPE_MASK GENMASK(11, 10) +#define BE_RXD_LAST_MSDU BIT(12) +#define BE_RXD_AMSDU_CUT BIT(13) +#define BE_RXD_ADDR_CAM_VLD BIT(14) +#define BE_RXD_REORDER BIT(15) +#define BE_RXD_SEQ_MASK GENMASK(27, 16) +#define BE_RXD_TID_MASK GENMASK(31, 28) + +/* BE RXD dword3 */ +#define BE_RXD_SEC_TYPE_MASK GENMASK(3, 0) +#define BE_RXD_BIP_KEYID BIT(4) +#define BE_RXD_BIP_ENC BIT(5) +#define BE_RXD_CRC32_ERR BIT(6) +#define BE_RXD_ICV_ERR BIT(7) +#define BE_RXD_HW_DEC BIT(8) +#define BE_RXD_SW_DEC BIT(9) +#define BE_RXD_A1_MATCH BIT(10) +#define BE_RXD_AMPDU BIT(11) +#define BE_RXD_AMPDU_EOF BIT(12) +#define BE_RXD_AMSDU BIT(13) +#define BE_RXD_MC BIT(14) +#define BE_RXD_BC BIT(15) +#define BE_RXD_MD BIT(16) +#define BE_RXD_MF BIT(17) +#define BE_RXD_PWR BIT(18) +#define BE_RXD_QOS BIT(19) +#define BE_RXD_EOSP BIT(20) +#define BE_RXD_HTC BIT(21) +#define BE_RXD_QNULL BIT(22) +#define BE_RXD_A4_FRAME BIT(23) +#define BE_RXD_FRAG_MASK GENMASK(27, 24) +#define BE_RXD_GET_CH_INFO_V1_MASK GENMASK(31, 30) + +/* BE RXD dword4 */ +#define BE_RXD_PPDU_TYPE_MASK GENMASK(7, 0) +#define BE_RXD_PPDU_CNT_MASK GENMASK(10, 8) +#define BE_RXD_BW_MASK GENMASK(14, 12) +#define BE_RXD_RX_GI_LTF_MASK GENMASK(18, 16) +#define BE_RXD_RX_REORDER_FIELD_EN BIT(19) +#define BE_RXD_RX_DATARATE_MASK GENMASK(31, 20) + +/* BE RXD dword5 */ +#define BE_RXD_FREERUN_CNT_MASK GENMASK(31, 0) + +/* BE RXD dword6 */ +#define BE_RXD_ADDR_CAM_MASK GENMASK(7, 0) +#define BE_RXD_SR_EN BIT(13) +#define BE_RXD_NON_SRG_PPDU BIT(14) +#define BE_RXD_INTER_PPDU BIT(15) +#define BE_RXD_USER_ID_MASK GENMASK(21, 16) +#define BE_RXD_RX_STATISTICS BIT(22) +#define BE_RXD_SMART_ANT BIT(23) +#define BE_RXD_SEC_CAM_IDX_MASK GENMASK(31, 24) + +/* BE RXD dword7 */ +#define BE_RXD_PATTERN_IDX_MASK GENMASK(4, 0) +#define BE_RXD_MAGIC_WAKE BIT(5) +#define BE_RXD_UNICAST_WAKE BIT(6) +#define BE_RXD_PATTERN_WAKE BIT(7) +#define BE_RXD_RX_PL_MATCH BIT(8) +#define BE_RXD_RX_PL_ID_MASK GENMASK(15, 12) +#define BE_RXD_HDR_CNV BIT(16) +#define BE_RXD_NAT25_HIT BIT(17) +#define BE_RXD_IS_DA BIT(18) +#define BE_RXD_CHKSUM_OFFLOAD_EN BIT(19) +#define BE_RXD_RXSC_ENTRY_MASK GENMASK(22, 20) +#define BE_RXD_RXSC_HIT BIT(23) +#define BE_RXD_WITH_LLC BIT(24) +#define BE_RXD_RX_AGG_FIELD_EN BIT(25) + +/* BE RXD dword8 */ +#define BE_RXD_MAC_ADDR_MASK GENMASK(31, 0) + +/* BE RXD dword9 */ +#define BE_RXD_MAC_ADDR_H_MASK GENMASK(15, 0) +#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16) +#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21) + struct rtw89_phy_sts_ie0 { __le32 w0; __le32 w1; diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index aa9efca040..660bf2ece9 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -488,6 +488,8 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + const struct rtw89_chip_info *chip = rtwdev->chip; + bool include_bb = !!chip->bbmcu_nr; struct ieee80211_sta *wow_sta; struct rtw89_sta *rtwsta = NULL; bool is_conn = true; @@ -501,7 +503,7 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) else is_conn = false; - ret = rtw89_fw_download(rtwdev, fw_type); + ret = rtw89_fw_download(rtwdev, fw_type, include_bb); if (ret) { rtw89_warn(rtwdev, "download fw failed\n"); return ret; diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c index caa22226b0..a44a7403ce 100644 --- a/drivers/net/wireless/silabs/wfx/data_tx.c +++ b/drivers/net/wireless/silabs/wfx/data_tx.c @@ -208,6 +208,36 @@ static bool wfx_is_action_back(struct ieee80211_hdr *hdr) return true; } +struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) +{ + struct ieee80211_tx_info *tx_info; + + if (!skb) + return NULL; + tx_info = IEEE80211_SKB_CB(skb); + return (struct wfx_tx_priv *)tx_info->rate_driver_data; +} + +struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) +{ + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; + + return req; +} + +struct wfx_vif *wfx_skb_wvif(struct wfx_dev *wdev, struct sk_buff *skb) +{ + struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb); + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + + if (tx_priv->vif_id != hif->interface && hif->interface != 2) { + dev_err(wdev->dev, "corrupted skb"); + return wdev_to_wvif(wdev, hif->interface); + } + return wdev_to_wvif(wdev, tx_priv->vif_id); +} + static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr) { @@ -321,6 +351,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct /* Fill tx_priv */ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data; tx_priv->icv_size = wfx_tx_get_icv_len(hw_key); + tx_priv->vif_id = wvif->id; /* Fill hif_msg */ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb"); @@ -331,7 +362,10 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct hif_msg = (struct wfx_hif_msg *)skb->data; hif_msg->len = cpu_to_le16(skb->len); hif_msg->id = HIF_REQ_ID_TX; - hif_msg->interface = wvif->id; + if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + hif_msg->interface = 2; + else + hif_msg->interface = wvif->id; if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) { dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n", @@ -352,9 +386,15 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct req->fc_offset = offset; /* Queue index are inverted between firmware and Linux */ req->queue_id = 3 - queue_id; - req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); - req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); - req->frame_format = wfx_tx_get_frame_format(tx_info); + if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + req->peer_sta_id = HIF_LINK_ID_NOT_ASSOCIATED; + req->retry_policy_index = HIF_TX_RETRY_POLICY_INVALID; + req->frame_format = HIF_FRAME_FORMAT_NON_HT; + } else { + req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); + req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); + req->frame_format = wfx_tx_get_frame_format(tx_info); + } if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI) req->short_gi = 1; if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) @@ -470,7 +510,7 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg) } tx_info = IEEE80211_SKB_CB(skb); tx_priv = wfx_skb_tx_priv(skb); - wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface); + wvif = wfx_skb_wvif(wdev, skb); WARN_ON(!wvif); if (!wvif) return; @@ -532,7 +572,6 @@ void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, b struct wfx_dev *wdev = hw->priv; struct sk_buff_head dropped; struct wfx_vif *wvif; - struct wfx_hif_msg *hif; struct sk_buff *skb; skb_queue_head_init(&dropped); @@ -548,8 +587,7 @@ void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, b if (wdev->chip_frozen) wfx_pending_drop(wdev, &dropped); while ((skb = skb_dequeue(&dropped)) != NULL) { - hif = (struct wfx_hif_msg *)skb->data; - wvif = wdev_to_wvif(wdev, hif->interface); + wvif = wfx_skb_wvif(wdev, skb); ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb)); wfx_skb_dtor(wvif, skb); } diff --git a/drivers/net/wireless/silabs/wfx/data_tx.h b/drivers/net/wireless/silabs/wfx/data_tx.h index 983470705e..0621b82103 100644 --- a/drivers/net/wireless/silabs/wfx/data_tx.h +++ b/drivers/net/wireless/silabs/wfx/data_tx.h @@ -36,6 +36,7 @@ struct wfx_tx_policy_cache { struct wfx_tx_priv { ktime_t xmit_timestamp; unsigned char icv_size; + unsigned char vif_id; }; void wfx_tx_policy_init(struct wfx_vif *wvif); @@ -45,22 +46,8 @@ void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struc void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg); void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); -static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) -{ - struct ieee80211_tx_info *tx_info; - - if (!skb) - return NULL; - tx_info = IEEE80211_SKB_CB(skb); - return (struct wfx_tx_priv *)tx_info->rate_driver_data; -} - -static inline struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) -{ - struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; - struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; - - return req; -} +struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb); +struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb); +struct wfx_vif *wfx_skb_wvif(struct wfx_dev *wdev, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c index 9402503fbd..9f403d275c 100644 --- a/drivers/net/wireless/silabs/wfx/hif_tx.c +++ b/drivers/net/wireless/silabs/wfx/hif_tx.c @@ -45,6 +45,24 @@ static void *wfx_alloc_hif(size_t body_len, struct wfx_hif_msg **hif) return NULL; } +static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates) +{ + int i; + u32 ret = 0; + /* The device only supports 2GHz */ + struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; + + for (i = 0; i < sband->n_bitrates; i++) { + if (rates & BIT(i)) { + if (i >= sband->n_bitrates) + dev_warn(wdev->dev, "unsupported basic rate\n"); + else + ret |= BIT(sband->bitrates[i].hw_value); + } + } + return ret; +} + int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, void *reply, size_t reply_len, bool no_reply) { @@ -220,6 +238,31 @@ int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, s return ret; } +/* Hijack scan request to implement Remain-On-Channel */ +int wfx_hif_scan_uniq(struct wfx_vif *wvif, struct ieee80211_channel *chan, int duration) +{ + int ret; + struct wfx_hif_msg *hif; + size_t buf_len = sizeof(struct wfx_hif_req_start_scan_alt) + sizeof(u8); + struct wfx_hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->num_of_ssids = HIF_API_MAX_NB_SSIDS; + body->maintain_current_bss = 1; + body->disallow_ps = 1; + body->tx_power_level = cpu_to_le32(chan->max_power); + body->num_of_channels = 1; + body->channel_list[0] = chan->hw_value; + body->max_transmit_rate = API_RATE_INDEX_B_1MBPS; + body->min_channel_time = cpu_to_le32(duration); + body->max_channel_time = cpu_to_le32(duration * 110 / 100); + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int chan_start_idx, int chan_num) { diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.h b/drivers/net/wireless/silabs/wfx/hif_tx.h index 71817a6571..aab54df6aa 100644 --- a/drivers/net/wireless/silabs/wfx/hif_tx.h +++ b/drivers/net/wireless/silabs/wfx/hif_tx.h @@ -54,6 +54,7 @@ int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable); int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len); int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, int chan_start, int chan_num); +int wfx_hif_scan_uniq(struct wfx_vif *wvif, struct ieee80211_channel *chan, int duration); int wfx_hif_stop_scan(struct wfx_vif *wvif); int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len); int wfx_hif_shutdown(struct wfx_dev *wdev); diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c index ede822d771..e7198520bd 100644 --- a/drivers/net/wireless/silabs/wfx/main.c +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -151,6 +151,8 @@ static const struct ieee80211_ops wfx_ops = { .change_chanctx = wfx_change_chanctx, .assign_vif_chanctx = wfx_assign_vif_chanctx, .unassign_vif_chanctx = wfx_unassign_vif_chanctx, + .remain_on_channel = wfx_remain_on_channel, + .cancel_remain_on_channel = wfx_cancel_remain_on_channel, }; bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor) @@ -246,6 +248,7 @@ static void wfx_free_common(void *data) mutex_destroy(&wdev->tx_power_loop_info_lock); mutex_destroy(&wdev->rx_stats_lock); + mutex_destroy(&wdev->scan_lock); mutex_destroy(&wdev->conf_mutex); ieee80211_free_hw(wdev->hw); } @@ -288,6 +291,7 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + hw->wiphy->max_remain_on_channel_duration = 5000; hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX; hw->wiphy->max_scan_ssids = 2; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; @@ -314,6 +318,7 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); mutex_init(&wdev->conf_mutex); + mutex_init(&wdev->scan_lock); mutex_init(&wdev->rx_stats_lock); mutex_init(&wdev->tx_power_loop_info_lock); init_completion(&wdev->firmware_ready); diff --git a/drivers/net/wireless/silabs/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c index 37f492e5d3..e61b86f211 100644 --- a/drivers/net/wireless/silabs/wfx/queue.c +++ b/drivers/net/wireless/silabs/wfx/queue.c @@ -68,13 +68,16 @@ void wfx_tx_queues_init(struct wfx_vif *wvif) for (i = 0; i < IEEE80211_NUM_ACS; ++i) { skb_queue_head_init(&wvif->tx_queue[i].normal); skb_queue_head_init(&wvif->tx_queue[i].cab); + skb_queue_head_init(&wvif->tx_queue[i].offchan); wvif->tx_queue[i].priority = priorities[i]; } } bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue) { - return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab); + return skb_queue_empty_lockless(&queue->normal) && + skb_queue_empty_lockless(&queue->cab) && + skb_queue_empty_lockless(&queue->offchan); } void wfx_tx_queues_check_empty(struct wfx_vif *wvif) @@ -103,8 +106,9 @@ static void __wfx_tx_queue_drop(struct wfx_vif *wvif, void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, struct sk_buff_head *dropped) { - __wfx_tx_queue_drop(wvif, &queue->cab, dropped); __wfx_tx_queue_drop(wvif, &queue->normal, dropped); + __wfx_tx_queue_drop(wvif, &queue->cab, dropped); + __wfx_tx_queue_drop(wvif, &queue->offchan, dropped); wake_up(&wvif->wdev->tx_dequeue); } @@ -113,7 +117,9 @@ void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb) struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + skb_queue_tail(&queue->offchan, skb); + else if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) skb_queue_tail(&queue->cab, skb); else skb_queue_tail(&queue->normal, skb); @@ -123,13 +129,11 @@ void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped) { struct wfx_queue *queue; struct wfx_vif *wvif; - struct wfx_hif_msg *hif; struct sk_buff *skb; WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__); while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) { - hif = (struct wfx_hif_msg *)skb->data; - wvif = wdev_to_wvif(wdev, hif->interface); + wvif = wfx_skb_wvif(wdev, skb); if (wvif) { queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; WARN_ON(skb_get_queue_mapping(skb) > 3); @@ -155,7 +159,7 @@ struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id) if (req->packet_id != packet_id) continue; spin_unlock_bh(&wdev->tx_pending.lock); - wvif = wdev_to_wvif(wdev, hif->interface); + wvif = wfx_skb_wvif(wdev, skb); if (wvif) { queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; WARN_ON(skb_get_queue_mapping(skb) > 3); @@ -246,6 +250,26 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev) } } + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + for (i = 0; i < num_queues; i++) { + skb = skb_dequeue(&queues[i]->offchan); + if (!skb) + continue; + hif = (struct wfx_hif_msg *)skb->data; + /* Offchan frames are assigned to a special interface. + * The only interface allowed to send data during scan. + */ + WARN_ON(hif->interface != 2); + atomic_inc(&queues[i]->pending_frames); + trace_queues_stats(wdev, queues[i]); + return skb; + } + } + + if (mutex_is_locked(&wdev->scan_lock)) + return NULL; + wvif = NULL; while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { if (!wvif->after_dtim_tx_allowed) diff --git a/drivers/net/wireless/silabs/wfx/queue.h b/drivers/net/wireless/silabs/wfx/queue.h index 4731debca9..6857fbd60f 100644 --- a/drivers/net/wireless/silabs/wfx/queue.h +++ b/drivers/net/wireless/silabs/wfx/queue.h @@ -17,6 +17,7 @@ struct wfx_vif; struct wfx_queue { struct sk_buff_head normal; struct sk_buff_head cab; /* Content After (DTIM) Beacon */ + struct sk_buff_head offchan; atomic_t pending_frames; int priority; }; diff --git a/drivers/net/wireless/silabs/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c index 16f619ed22..c3c103ff88 100644 --- a/drivers/net/wireless/silabs/wfx/scan.c +++ b/drivers/net/wireless/silabs/wfx/scan.c @@ -95,7 +95,7 @@ void wfx_hw_scan_work(struct work_struct *work) int chan_cur, ret, err; mutex_lock(&wvif->wdev->conf_mutex); - mutex_lock(&wvif->scan_lock); + mutex_lock(&wvif->wdev->scan_lock); if (wvif->join_in_progress) { dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN"); wfx_reset(wvif); @@ -116,7 +116,7 @@ void wfx_hw_scan_work(struct work_struct *work) ret = -ETIMEDOUT; } } while (ret >= 0 && chan_cur < hw_req->req.n_channels); - mutex_unlock(&wvif->scan_lock); + mutex_unlock(&wvif->wdev->scan_lock); mutex_unlock(&wvif->wdev->conf_mutex); wfx_ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0); } @@ -145,3 +145,65 @@ void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done) wvif->scan_nb_chan_done = nb_chan_done; complete(&wvif->scan_complete); } + +void wfx_remain_on_channel_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, remain_on_channel_work); + struct ieee80211_channel *chan = wvif->remain_on_channel_chan; + int duration = wvif->remain_on_channel_duration; + int ret; + + /* Hijack scan request to implement Remain-On-Channel */ + mutex_lock(&wvif->wdev->conf_mutex); + mutex_lock(&wvif->wdev->scan_lock); + if (wvif->join_in_progress) { + dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN"); + wfx_reset(wvif); + } + wfx_tx_flush(wvif->wdev); + + reinit_completion(&wvif->scan_complete); + ret = wfx_hif_scan_uniq(wvif, chan, duration); + if (ret) + goto end; + ieee80211_ready_on_channel(wvif->wdev->hw); + ret = wait_for_completion_timeout(&wvif->scan_complete, + msecs_to_jiffies(duration * 120 / 100)); + if (!ret) { + wfx_hif_stop_scan(wvif); + ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); + dev_dbg(wvif->wdev->dev, "roc timeout\n"); + } + if (!ret) + dev_err(wvif->wdev->dev, "roc didn't stop\n"); + ieee80211_remain_on_channel_expired(wvif->wdev->hw); +end: + mutex_unlock(&wvif->wdev->scan_lock); + mutex_unlock(&wvif->wdev->conf_mutex); + wfx_bh_request_tx(wvif->wdev); +} + +int wfx_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration, + enum ieee80211_roc_type type) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + if (wfx_api_older_than(wdev, 3, 10)) + return -EOPNOTSUPP; + + wvif->remain_on_channel_duration = duration; + wvif->remain_on_channel_chan = chan; + schedule_work(&wvif->remain_on_channel_work); + return 0; +} + +int wfx_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_hif_stop_scan(wvif); + flush_work(&wvif->remain_on_channel_work); + return 0; +} diff --git a/drivers/net/wireless/silabs/wfx/scan.h b/drivers/net/wireless/silabs/wfx/scan.h index 78e3b984f3..995ab8c6cb 100644 --- a/drivers/net/wireless/silabs/wfx/scan.h +++ b/drivers/net/wireless/silabs/wfx/scan.h @@ -19,4 +19,10 @@ int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done); +void wfx_remain_on_channel_work(struct work_struct *work); +int wfx_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration, + enum ieee80211_roc_type type); +int wfx_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + #endif diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c index 626dfb4b7a..537caf9d91 100644 --- a/drivers/net/wireless/silabs/wfx/sta.c +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -20,24 +20,6 @@ #define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2 -u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates) -{ - int i; - u32 ret = 0; - /* The device only supports 2GHz */ - struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; - - for (i = 0; i < sband->n_bitrates; i++) { - if (rates & BIT(i)) { - if (i >= sband->n_bitrates) - dev_warn(wdev->dev, "unsupported basic rate\n"); - else - ret |= BIT(sband->bitrates[i].hw_value); - } - } - return ret; -} - void wfx_cooling_timeout_work(struct work_struct *work) { struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev, @@ -114,10 +96,12 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROBE_REQ | FIF_PSPOLL; + /* Filters are ignored during the scan. No frames are filtered. */ + if (mutex_is_locked(&wdev->scan_lock)) + return; + mutex_lock(&wdev->conf_mutex); while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - mutex_lock(&wvif->scan_lock); - /* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and * beacons from other BSS */ @@ -144,8 +128,6 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, else filter_prbreq = true; wfx_hif_set_rx_filter(wvif, filter_bssid, filter_prbreq); - - mutex_unlock(&wvif->scan_lock); } mutex_unlock(&wdev->conf_mutex); } @@ -354,29 +336,38 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif) return 0; } -static void wfx_set_mfp_ap(struct wfx_vif *wvif) +static int wfx_set_mfp_ap(struct wfx_vif *wvif) { struct ieee80211_vif *vif = wvif_to_vif(wvif); struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif, 0); const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); - const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, - skb->len - ieoffset); const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16); const int pairwise_cipher_suite_size = 4 / sizeof(u16); const int akm_suite_size = 4 / sizeof(u16); + const u16 *ptr; - if (ptr) { - ptr += pairwise_cipher_suite_count_offset; - if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return; - ptr += 1 + pairwise_cipher_suite_size * *ptr; - if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return; - ptr += 1 + akm_suite_size * *ptr; - if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return; - wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6)); - } + if (unlikely(!skb)) + return -ENOMEM; + + ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, + skb->len - ieoffset); + if (unlikely(!ptr)) + return -EINVAL; + + ptr += pairwise_cipher_suite_count_offset; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return -EINVAL; + + ptr += 1 + pairwise_cipher_suite_size * *ptr; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return -EINVAL; + + ptr += 1 + akm_suite_size * *ptr; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return -EINVAL; + + wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6)); + return 0; } int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -394,15 +385,19 @@ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel); if (ret > 0) return -EIO; - wfx_set_mfp_ap(wvif); - return ret; + return wfx_set_mfp_ap(wvif); } void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_dev *wdev = wvif->wdev; + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_update_pm(wvif); + wvif = (struct wfx_vif *)vif->drv_priv; wfx_reset(wvif); } @@ -634,18 +629,14 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd) { - struct wfx_vif *wvif_it; - if (notify_cmd != STA_NOTIFY_AWAKE) return; /* Device won't be able to honor CAB if a scan is in progress on any interface. Prefer to * skip this DTIM and wait for the next one. */ - wvif_it = NULL; - while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL) - if (mutex_is_locked(&wvif_it->scan_lock)) - return; + if (mutex_is_locked(&wvif->wdev->scan_lock)) + return; if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed) dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)", @@ -743,9 +734,9 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) complete(&wvif->set_pm_mode_complete); INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); - mutex_init(&wvif->scan_lock); init_completion(&wvif->scan_complete); INIT_WORK(&wvif->scan_work, wfx_hw_scan_work); + INIT_WORK(&wvif->remain_on_channel_work, wfx_remain_on_channel_work); wfx_tx_queues_init(wvif); wfx_tx_policy_init(wvif); diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h index 888db5cd32..c478ddcb93 100644 --- a/drivers/net/wireless/silabs/wfx/sta.h +++ b/drivers/net/wireless/silabs/wfx/sta.h @@ -66,6 +66,5 @@ int wfx_update_pm(struct wfx_vif *wvif); /* Other Helpers */ void wfx_reset(struct wfx_vif *wvif); -u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates); #endif diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h index 13ba84b3b2..bd0df2e1ea 100644 --- a/drivers/net/wireless/silabs/wfx/wfx.h +++ b/drivers/net/wireless/silabs/wfx/wfx.h @@ -43,6 +43,7 @@ struct wfx_dev { struct delayed_work cooling_timeout_work; bool poll_irq; bool chip_frozen; + struct mutex scan_lock; struct mutex conf_mutex; struct wfx_hif_cmd hif_cmd; @@ -69,6 +70,7 @@ struct wfx_vif { bool after_dtim_tx_allowed; bool join_in_progress; + struct completion set_pm_mode_complete; struct delayed_work beacon_loss_work; @@ -80,15 +82,15 @@ struct wfx_vif { unsigned long uapsd_mask; - /* avoid some operations in parallel with scan */ - struct mutex scan_lock; struct work_struct scan_work; struct completion scan_complete; int scan_nb_chan_done; bool scan_abort; struct ieee80211_scan_request *scan_req; - struct completion set_pm_mode_complete; + struct ieee80211_channel *remain_on_channel_chan; + int remain_on_channel_duration; + struct work_struct remain_on_channel_work; }; static inline struct ieee80211_vif *wvif_to_vif(struct wfx_vif *wvif) diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index 6894b919ff..084d52b11f 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -994,7 +994,7 @@ void cw1200_skb_dtor(struct cw1200_common *priv, txpriv->raw_link_id, txpriv->tid); tx_policy_put(priv, txpriv->rate_id); } - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } void cw1200_rx_cb(struct cw1200_common *priv, @@ -1166,7 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); - if (tim_ie) { + if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) { struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2]; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index eded284af6..cd9a41f59f 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -404,7 +404,7 @@ static int wl1251_op_start(struct ieee80211_hw *hw) /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip_id; - strncpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version)); + strscpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version)); out: if (ret < 0) diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index e9dc3c72bb..474b603c12 100644 --- a/drivers/net/wireless/ti/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c @@ -434,7 +434,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl, result->status, wl1251_tx_parse_status(result->status)); - ieee80211_tx_status(wl->hw, skb); + ieee80211_tx_status_skb(wl->hw, skb); wl->tx_frames[result->id] = NULL; } @@ -566,7 +566,7 @@ void wl1251_tx_flush(struct wl1251 *wl) if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) continue; - ieee80211_tx_status(wl->hw, skb); + ieee80211_tx_status_skb(wl->hw, skb); } for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) @@ -577,7 +577,7 @@ void wl1251_tx_flush(struct wl1251 *wl) if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) continue; - ieee80211_tx_status(wl->hw, skb); + ieee80211_tx_status_skb(wl->hw, skb); wl->tx_frames[i] = NULL; } } diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index d06a2c4194..de045fe4ca 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -1919,7 +1919,7 @@ out: return ret; } -static int wl12xx_remove(struct platform_device *pdev) +static void wl12xx_remove(struct platform_device *pdev) { struct wl1271 *wl = platform_get_drvdata(pdev); struct wl12xx_priv *priv; @@ -1928,7 +1928,7 @@ static int wl12xx_remove(struct platform_device *pdev) kfree(priv->rx_mem_addr); - return wlcore_remove(pdev); + wlcore_remove(pdev); } static const struct platform_device_id wl12xx_id_table[] = { @@ -1939,7 +1939,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table); static struct platform_driver wl12xx_driver = { .probe = wl12xx_probe, - .remove = wl12xx_remove, + .remove_new = wl12xx_remove, .id_table = wl12xx_id_table, .driver = { .name = "wl12xx_driver", diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 0b3cf8477c..20d9181b34 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -1516,12 +1516,9 @@ static int wl18xx_handle_static_data(struct wl1271 *wl, struct wl18xx_static_data_priv *static_data_priv = (struct wl18xx_static_data_priv *) static_data->priv; - strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version, + strscpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version, sizeof(wl->chip.phy_fw_ver_str)); - /* make sure the string is NULL-terminated */ - wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0'; - wl1271_info("PHY firmware version: %s", static_data_priv->phy_version); return 0; @@ -2033,7 +2030,7 @@ MODULE_DEVICE_TABLE(platform, wl18xx_id_table); static struct platform_driver wl18xx_driver = { .probe = wl18xx_probe, - .remove = wlcore_remove, + .remove_new = wlcore_remove, .id_table = wl18xx_id_table, .driver = { .name = "wl18xx_driver", diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c index 85abd0a2d1..f481c2e3db 100644 --- a/drivers/net/wireless/ti/wlcore/boot.c +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -41,12 +41,9 @@ static int wlcore_boot_parse_fw_ver(struct wl1271 *wl, { int ret; - strncpy(wl->chip.fw_ver_str, static_data->fw_version, + strscpy(wl->chip.fw_ver_str, static_data->fw_version, sizeof(wl->chip.fw_ver_str)); - /* make sure the string is NULL-terminated */ - wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; - ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index 46ab69eab2..1e082d039b 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -229,7 +229,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl, vif = wl12xx_wlvif_to_vif(wlvif); if (wlvif->bss_type == BSS_TYPE_STA_BSS) { - ieee80211_chswitch_done(vif, success); + ieee80211_chswitch_done(vif, success, 0); cancel_delayed_work(&wlvif->channel_switch_work); } else { set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index bf21611872..fb9ed97774 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1126,7 +1126,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode) /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip.id; - strncpy(wiphy->fw_version, wl->chip.fw_ver_str, + strscpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version)); goto out; @@ -2043,7 +2043,7 @@ static void wlcore_channel_switch_work(struct work_struct *work) goto out; vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) @@ -2344,7 +2344,7 @@ power_off: /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip.id; - strncpy(wiphy->fw_version, wl->chip.fw_ver_str, + strscpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version)); /* @@ -3030,7 +3030,7 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif) struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); wl12xx_cmd_stop_channel_switch(wl, wlvif); - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); cancel_delayed_work(&wlvif->channel_switch_work); } @@ -5451,7 +5451,7 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, if (unlikely(wl->state == WLCORE_STATE_OFF)) { if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); goto out; } else if (unlikely(wl->state != WLCORE_STATE_ON)) { goto out; @@ -6737,7 +6737,7 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) } EXPORT_SYMBOL_GPL(wlcore_probe); -int wlcore_remove(struct platform_device *pdev) +void wlcore_remove(struct platform_device *pdev) { struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); struct wl1271 *wl = platform_get_drvdata(pdev); @@ -6752,7 +6752,7 @@ int wlcore_remove(struct platform_device *pdev) if (pdev_data->family && pdev_data->family->nvs_name) wait_for_completion(&wl->nvs_loading_complete); if (!wl->initialized) - return 0; + return; if (wl->wakeirq >= 0) { dev_pm_clear_wake_irq(wl->dev); @@ -6772,8 +6772,6 @@ int wlcore_remove(struct platform_device *pdev) free_irq(wl->irq, wl); wlcore_free_hw(wl); - - return 0; } EXPORT_SYMBOL_GPL(wlcore_remove); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 81c94d3906..1f8511bf9b 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -497,7 +497,7 @@ struct wl1271 { }; int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); -int wlcore_remove(struct platform_device *pdev); +void wlcore_remove(struct platform_device *pdev); struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, u32 mbox_size); int wlcore_free_hw(struct wl1271 *wl); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index f5a0880da3..c7b4414cc6 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -72,15 +72,6 @@ MODULE_PARM_DESC(mlo, "Support MLO"); /** * enum hwsim_regtest - the type of regulatory tests we offer * - * These are the different values you can use for the regtest - * module parameter. This is useful to help test world roaming - * and the driver regulatory_hint() call and combinations of these. - * If you want to do specific alpha2 regulatory domain tests simply - * use the userspace regulatory request as that will be respected as - * well without the need of this module parameter. This is designed - * only for testing the driver regulatory request, world roaming - * and all possible combinations. - * * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, * this is the default value. * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory @@ -125,6 +116,15 @@ MODULE_PARM_DESC(mlo, "Support MLO"); * domain request * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio * regulatory requests. + * + * These are the different values you can use for the regtest + * module parameter. This is useful to help test world roaming + * and the driver regulatory_hint() call and combinations of these. + * If you want to do specific alpha2 regulatory domain tests simply + * use the userspace regulatory request as that will be respected as + * well without the need of this module parameter. This is designed + * only for testing the driver regulatory request, world roaming + * and all possible combinations. */ enum hwsim_regtest { HWSIM_REGTEST_DISABLED = 0, @@ -2445,6 +2445,14 @@ static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw, vp->assoc = vif->cfg.assoc; vp->aid = vif->cfg.aid; } + + if (vif->type == NL80211_IFTYPE_STATION && + changed & BSS_CHANGED_MLD_VALID_LINKS) { + u16 usable_links = ieee80211_vif_usable_links(vif); + + if (vif->active_links != usable_links) + ieee80211_set_active_links_async(vif, usable_links); + } } static void mac80211_hwsim_link_info_changed(struct ieee80211_hw *hw, @@ -4899,25 +4907,19 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = { static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband) { - u16 n_iftype_data; - - if (sband->band == NL80211_BAND_2GHZ) { - n_iftype_data = ARRAY_SIZE(sband_capa_2ghz); - sband->iftype_data = - (struct ieee80211_sband_iftype_data *)sband_capa_2ghz; - } else if (sband->band == NL80211_BAND_5GHZ) { - n_iftype_data = ARRAY_SIZE(sband_capa_5ghz); - sband->iftype_data = - (struct ieee80211_sband_iftype_data *)sband_capa_5ghz; - } else if (sband->band == NL80211_BAND_6GHZ) { - n_iftype_data = ARRAY_SIZE(sband_capa_6ghz); - sband->iftype_data = - (struct ieee80211_sband_iftype_data *)sband_capa_6ghz; - } else { - return; + switch (sband->band) { + case NL80211_BAND_2GHZ: + ieee80211_set_sband_iftype_data(sband, sband_capa_2ghz); + break; + case NL80211_BAND_5GHZ: + ieee80211_set_sband_iftype_data(sband, sband_capa_5ghz); + break; + case NL80211_BAND_6GHZ: + ieee80211_set_sband_iftype_data(sband, sband_capa_6ghz); + break; + default: + break; } - - sband->n_iftype_data = n_iftype_data; } #ifdef CONFIG_MAC80211_MESH diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h index 92126f02c5..4676cdaf4c 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.h +++ b/drivers/net/wireless/virtual/mac80211_hwsim.h @@ -3,7 +3,7 @@ * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen * Copyright (c) 2011, Javier Lopez - * Copyright (C) 2020, 2022 Intel Corporation + * Copyright (C) 2020, 2022-2023 Intel Corporation */ #ifndef __MAC80211_HWSIM_H @@ -86,7 +86,7 @@ enum hwsim_tx_control_flags { * with %HWSIM_CMD_REPORT_PMSR. * @__HWSIM_CMD_MAX: enum limit */ -enum { +enum hwsim_commands { HWSIM_CMD_UNSPEC, HWSIM_CMD_REGISTER, HWSIM_CMD_FRAME, @@ -117,11 +117,11 @@ enum { * the frame was broadcasted from * @HWSIM_ATTR_FRAME: Data array * @HWSIM_ATTR_FLAGS: mac80211 transmission flags, used to process - properly the frame at user space + * properly the frame at user space * @HWSIM_ATTR_RX_RATE: estimated rx rate index for this frame at user - space + * space * @HWSIM_ATTR_SIGNAL: estimated RX signal for this frame at user - space + * space * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO @@ -140,6 +140,7 @@ enum { * command to force radio removal when process that created the radio dies * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666 * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio. + * @HWSIM_ATTR_PAD: padding attribute for 64-bit values, ignore * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding * rates of %HWSIM_ATTR_TX_INFO @@ -156,9 +157,7 @@ enum { * to provide peer measurement result (nl80211_peer_measurement_attrs) * @__HWSIM_ATTR_MAX: enum limit */ - - -enum { +enum hwsim_attrs { HWSIM_ATTR_UNSPEC, HWSIM_ATTR_ADDR_RECEIVER, HWSIM_ATTR_ADDR_TRANSMITTER, @@ -259,7 +258,7 @@ enum hwsim_tx_rate_flags { * struct hwsim_tx_rate - rate selection/status * * @idx: rate index to attempt to send with - * @count: number of tries in this rate before going to the next rate + * @flags: the rate flags according to &enum hwsim_tx_rate_flags * * A value of -1 for @idx indicates an invalid rate and, if used * in an array of retry rates, that no more rates should be tried. @@ -287,7 +286,7 @@ struct hwsim_tx_rate_flag { * @HWSIM_VQ_RX: receive frames and transmission info reports * @HWSIM_NUM_VQS: enum limit */ -enum { +enum hwsim_vqs { HWSIM_VQ_TX, HWSIM_VQ_RX, HWSIM_NUM_VQS, diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h index e77084e767..fdc211bbed 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h +++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h @@ -51,7 +51,7 @@ struct ipc_chnl_cfg { /** * ipc_chnl_cfg_get - Get pipe configuration. * @chnl_cfg: Array of ipc_chnl_cfg struct - * @index: Channel index (upto MAX_CHANNELS) + * @index: Channel index (up to MAX_CHANNELS) * * Return: 0 on success and failure value on error */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index 026c5bd0f9..6bd0290e8b 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -36,8 +36,8 @@ /** * ipc_imem_sys_port_open - Open a port link to CP. * @ipc_imem: Imem instance. - * @chl_id: Channel Indentifier. - * @hp_id: HP Indentifier. + * @chl_id: Channel Identifier. + * @hp_id: HP Identifier. * * Return: channel instance on success, NULL for failure */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h index 17ca8d1f93..db5f1f9ebf 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h @@ -432,7 +432,7 @@ int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr); int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr); /** - * ipc_mux_get_max_sessions - Retuns the maximum sessions supported on the + * ipc_mux_get_max_sessions - Returns the maximum sessions supported on the * provided MUX instance.. * @ipc_mux: Pointer to MUX data-struct * diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.h b/drivers/net/wwan/iosm/iosm_ipc_pm.h index e7c00f388c..5f14d7932a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pm.h +++ b/drivers/net/wwan/iosm/iosm_ipc_pm.h @@ -172,7 +172,7 @@ bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm); bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm); /** - * ipc_pm_wait_for_device_active - Wait upto IPC_PM_ACTIVE_TIMEOUT_MS ms + * ipc_pm_wait_for_device_active - Wait up to IPC_PM_ACTIVE_TIMEOUT_MS ms * for the device to reach active state * @ipc_pm: Pointer to power management component * diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.h b/drivers/net/wwan/iosm/iosm_ipc_port.h index 11bc8ed216..d33c52aebf 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_port.h +++ b/drivers/net/wwan/iosm/iosm_ipc_port.h @@ -18,7 +18,7 @@ * @pcie: PCIe component * @port_type: WWAN port type * @channel: Channel instance - * @chl_id: Channel Indentifier + * @chl_id: Channel Identifier */ struct iosm_cdev { struct wwan_port *iosm_port; diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.h b/drivers/net/wwan/iosm/iosm_ipc_trace.h index 5ebe779058..3e7c7f163e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_trace.h +++ b/drivers/net/wwan/iosm/iosm_ipc_trace.h @@ -29,7 +29,7 @@ enum trace_ctrl_mode { * @ipc_imem: Imem instance * @dev: Pointer to device struct * @channel: Channel instance - * @chl_id: Channel Indentifier + * @chl_id: Channel Identifier * @trc_mutex: Mutex used for read and write mode * @mode: Mode for enable and disable trace */ diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c index 86b60aadfa..26756ff0e4 100644 --- a/drivers/net/wwan/rpmsg_wwan_ctrl.c +++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c @@ -37,7 +37,7 @@ static int rpmsg_wwan_ctrl_start(struct wwan_port *port) .dst = RPMSG_ADDR_ANY, }; - strncpy(chinfo.name, rpwwan->rpdev->id.name, RPMSG_NAME_SIZE); + strscpy(chinfo.name, rpwwan->rpdev->id.name, sizeof(chinfo.name)); rpwwan->ept = rpmsg_create_ept(rpwwan->rpdev, rpmsg_wwan_ctrl_callback, rpwwan, chinfo); if (!rpwwan->ept) diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index f4ff2198b5..210d84c67e 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -852,7 +852,7 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget) if (!ret) { napi_complete_done(napi, work_done); rxq->sleep_lock_pending = true; - napi_reschedule(napi); + napi_schedule(napi); return work_done; } diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index 80edb8e75a..0bc9743021 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -445,7 +445,8 @@ int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state ev return -EINVAL; } - event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + event = kmalloc(struct_size(event, data, length), + in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!event) return -ENOMEM; diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h index b6e76f3903..b0b3662ae6 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -102,7 +102,7 @@ struct t7xx_fsm_event { struct list_head entry; enum t7xx_fsm_event_state event_id; unsigned int length; - unsigned char data[]; + unsigned char data[] __counted_by(length); }; struct t7xx_fsm_command { diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 284ab1f563..72e01e550a 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2021, Linaro Ltd */ +#include #include #include #include @@ -301,7 +302,7 @@ static void wwan_remove_dev(struct wwan_device *wwandev) static const struct { const char * const name; /* Port type name */ - const char * const devsuf; /* Port devce name suffix */ + const char * const devsuf; /* Port device name suffix */ } wwan_port_types[WWAN_PORT_MAX + 1] = { [WWAN_PORT_AT] = { .name = "AT", @@ -395,7 +396,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) char buf[0x20]; int id; - idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL); + idmap = bitmap_zalloc(max_ports, GFP_KERNEL); if (!idmap) return -ENOMEM; @@ -414,7 +415,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) /* Allocate unique id */ id = find_first_zero_bit(idmap, max_ports); - free_page((unsigned long)idmap); + bitmap_free(idmap); snprintf(buf, sizeof(buf), fmt, id); /* Name generation */ @@ -1183,7 +1184,7 @@ void wwan_unregister_ops(struct device *parent) */ put_device(&wwandev->dev); - rtnl_lock(); /* Prevent concurent netdev(s) creation/destroying */ + rtnl_lock(); /* Prevent concurrent netdev(s) creation/destroying */ /* Remove all child netdev(s), using batch removing */ device_for_each_child(&wwandev->dev, &kill_list, diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fc3bb63b9a..7cff90aa8d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -252,6 +252,9 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) skb_clear_hash(skb); + /* timestamp packet in software */ + skb_tx_timestamp(skb); + if (!xenvif_rx_queue_tail(queue, skb)) goto drop; @@ -458,7 +461,7 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) static const struct ethtool_ops xenvif_ethtool_ops = { .get_link = ethtool_op_get_link, - + .get_ts_info = ethtool_op_get_ts_info, .get_sset_count = xenvif_get_sset_count, .get_ethtool_stats = xenvif_get_ethtool_stats, .get_strings = xenvif_get_strings, @@ -668,8 +671,7 @@ err: static void xenvif_disconnect_queue(struct xenvif_queue *queue) { if (queue->task) { - kthread_stop(queue->task); - put_task_struct(queue->task); + kthread_stop_put(queue->task); queue->task = NULL; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index d7503aef59..fab361a250 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -104,13 +104,12 @@ bool provides_xdp_headroom = true; module_param(provides_xdp_headroom, bool, 0644); static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, - u8 status); + s8 status); static void make_tx_response(struct xenvif_queue *queue, - struct xen_netif_tx_request *txp, + const struct xen_netif_tx_request *txp, unsigned int extra_count, - s8 st); -static void push_tx_responses(struct xenvif_queue *queue); + s8 status); static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); @@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue, unsigned int extra_count, RING_IDX end) { RING_IDX cons = queue->tx.req_cons; - unsigned long flags; do { - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR); - push_tx_responses(queue); - spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; RING_COPY_REQUEST(&queue->tx, cons++, txp); @@ -465,12 +460,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; nr_slots--) { if (unlikely(!txp->size)) { - unsigned long flags; - - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); - push_tx_responses(queue); - spin_unlock_irqrestore(&queue->response_lock, flags); ++txp; continue; } @@ -496,14 +486,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { if (unlikely(!txp->size)) { - unsigned long flags; - - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); - push_tx_responses(queue); - spin_unlock_irqrestore(&queue->response_lock, - flags); continue; } @@ -995,7 +979,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, (ret == 0) ? XEN_NETIF_RSP_OKAY : XEN_NETIF_RSP_ERROR); - push_tx_responses(queue); continue; } @@ -1007,7 +990,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, make_tx_response(queue, &txreq, extra_count, XEN_NETIF_RSP_OKAY); - push_tx_responses(queue); continue; } @@ -1433,8 +1415,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) return work_done; } +static void _make_tx_response(struct xenvif_queue *queue, + const struct xen_netif_tx_request *txp, + unsigned int extra_count, + s8 status) +{ + RING_IDX i = queue->tx.rsp_prod_pvt; + struct xen_netif_tx_response *resp; + + resp = RING_GET_RESPONSE(&queue->tx, i); + resp->id = txp->id; + resp->status = status; + + while (extra_count-- != 0) + RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; + + queue->tx.rsp_prod_pvt = ++i; +} + +static void push_tx_responses(struct xenvif_queue *queue) +{ + int notify; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + if (notify) + notify_remote_via_irq(queue->tx_irq); +} + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, - u8 status) + s8 status) { struct pending_tx_info *pending_tx_info; pending_ring_idx_t index; @@ -1444,8 +1453,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, spin_lock_irqsave(&queue->response_lock, flags); - make_tx_response(queue, &pending_tx_info->req, - pending_tx_info->extra_count, status); + _make_tx_response(queue, &pending_tx_info->req, + pending_tx_info->extra_count, status); /* Release the pending index before pusing the Tx response so * its available before a new Tx request is pushed by the @@ -1459,32 +1468,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, spin_unlock_irqrestore(&queue->response_lock, flags); } - static void make_tx_response(struct xenvif_queue *queue, - struct xen_netif_tx_request *txp, + const struct xen_netif_tx_request *txp, unsigned int extra_count, - s8 st) + s8 status) { - RING_IDX i = queue->tx.rsp_prod_pvt; - struct xen_netif_tx_response *resp; - - resp = RING_GET_RESPONSE(&queue->tx, i); - resp->id = txp->id; - resp->status = st; - - while (extra_count-- != 0) - RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; + unsigned long flags; - queue->tx.rsp_prod_pvt = ++i; -} + spin_lock_irqsave(&queue->response_lock, flags); -static void push_tx_responses(struct xenvif_queue *queue) -{ - int notify; + _make_tx_response(queue, txp, extra_count, status); + push_tx_responses(queue); - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); - if (notify) - notify_remote_via_irq(queue->tx_irq); + spin_unlock_irqrestore(&queue->response_lock, flags); } static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) -- cgit v1.2.3